[
  {
    "path": "Dockerfile",
    "content": "FROM python:3.7.3-slim-stretch\n\nRUN pip3 install prompt_toolkit==2.0.9 \\\n#\t\thttps://github.com/lief-project/packages/raw/lief-master-latest/pylief-0.9.0.dev.zip \\\n\t\tcapstone \\\n        https://github.com/JonathanSalwan/ROPgadget/archive/v5.9.zip \\\n\t&& apt-get update && apt-get install -y --no-install-recommends \\\n#\t\tg++ \\\n\t\tlibmagic1 \\\n        make \\\n        libcapstone-dev \\\n\t&& rm -rf /var/lib/apt/lists/* /root/.cache\n\nCOPY . ropium/\n# At the expense of a larger image size, recompilation of ropium can be\n# performed without reinstalling g++ (and thus without an active internet\n# connection) by uncommenting the previous g++ installation and removing the\n# apt-get commands of the following RUN\nRUN apt-get update && apt-get install -y --no-install-recommends g++ \\\n\t&& cd ropium && make && make test && make install \\\n\t&& cd .. && rm -rf ropium \\\n\t&& apt-get -y remove g++ \\\n\t&& apt-get purge -y --autoremove \\\n\t&& rm -rf /var/lib/apt/lists/*\n\nENTRYPOINT [\"ropium\"]\n"
  },
  {
    "path": "Makefile",
    "content": "CC = gcc\nCXX = g++\n\nOUTDIR = ./bin\nLIB_FILE = libropium.so\nLIB_HEADER_FILE = ropium.hpp\nBINDINGS_FILE = ropium.so\n\n## Basic default flags \nCFLAGS ?=\nCXXFLAGS ?=\nCXXFLAGS ?=\nLDFLAGS ?=\nLDLIBS ?=\nLDLIBS += -lcapstone\n\n## Flags for debug mode\nDEBUG ?= 0\nifeq ($(DEBUG), 1)\n\tCFLAGS += -g -O0\n\tCXXFLAGS += -g -O0\n\tLDFLAGS += -g\nelse\n\tCFLAGS += -O2\n\tCXXFLAGS += -O2 -Wno-narrowing\nendif\n\n## Bindings\nBINDINGS ?= 1\nifeq ($(BINDINGS), 1)\n\tCXXFLAGS += `python3-config --cflags` -DPYTHON_BINDINGS -Ibindings/python\n\tBINDINGS_DIR = ./bindings\n\tBINDINGS_SRCS = $(wildcard $(BINDINGS_DIR)/*.cpp)\n\tBINDINGS_OBJS = $(BINDINGS_SRCS:.cpp=.o)\n\tBINDINGS_RULE = bindings\n\tLDLIBS += `python3-config --libs`\nelse\n\tBINDINGS_RULE = \nendif\n\nSRCDIR=./libropium\n\n## Final C++ flags\nCXXFLAGS += -std=c++11 -fpermissive -fPIC -I $(SRCDIR)/include -I $(SRCDIR)/dependencies/murmur3 -Wno-write-strings -Wno-sign-compare -Wno-reorder\n\n# Source files\nSRCS=$(wildcard $(SRCDIR)/symbolic/*.cpp)\nSRCS+=$(wildcard $(SRCDIR)/ir/*.cpp)\nSRCS+=$(wildcard $(SRCDIR)/arch/*.cpp)\nSRCS+=$(wildcard $(SRCDIR)/ropchain/*.cpp)\nSRCS+=$(wildcard $(SRCDIR)/utils/*.cpp)\nSRCS+=$(wildcard $(SRCDIR)/database/*.cpp)\nSRCS+=$(wildcard $(SRCDIR)/compiler/*.cpp)\nOBJS=$(SRCS:.cpp=.o)\n\nTESTDIR = ./tests\nTESTSRCS = $(wildcard $(TESTDIR)/*.cpp)\nTESTOBJS = $(TESTSRCS:.cpp=.o)\n\nDEPDIR = $(SRCDIR)/dependencies\nDEPSRCS = $(DEPDIR)/murmur3/murmur3.c \nDEPOBJS = $(DEPSRCS:.c=.o)\n\nINCLUDEDIR = $(SRCDIR)/include\n\n# Compile lib and tests \nall: lib tests $(BINDINGS_RULE)\n\n# librop\nlib: $(OBJS) $(DEPOBJS)\n\t$(CXX) $(CXXFLAGS) $(LDFLAGS) -o $(OUTDIR)/$(LIB_FILE) -shared $(OBJS) $(DEPOBJS) $(LDLIBS)\n\n# unit tests\ntests: $(TESTOBJS) $(OBJS) $(DEPOBJS)\n\t$(CXX) $(CXXFLAGS) $(LDFLAGS) -o $(OUTDIR)/tests $(TESTOBJS) $(OBJS) $(DEPOBJS) $(LDLIBS)\n\n# bindings\nbindings: $(BINDINGS_OBJS) $(OBJS) $(DEPOBJS)\n\t$(CXX) $(CXXFLAGS) $(LDFLAGS) -o $(OUTDIR)/$(BINDINGS_FILE) -shared $(BINDINGS_OBJS) $(OBJS) $(DEPOBJS) $(LDLIBS)\n\n# generic\n%.o : %.cpp\n\t$(CXX) $(CXXFLAGS) $(LDFLAGS) -c $< -o $@ $(LDLIBS)\n\n%.o : %.c\n\t$(CC) $(CFLAGS) $(LDFLAGS) -c $< -o $@ $(LDLIBS)\n\n# Installation (assuming Linux system) \n# If prefix not set, set default\nifeq ($(PREFIX),)\n    PREFIX = /usr\nendif\n\nINSTALL_MESSAGE_RULE=\n# Check if lib and binding files exist\nifneq (,$(wildcard ./bin/$(LIB_FILE)))\n    INSTALL_LIB_RULE=install_lib\n\tINSTALL_MESSAGE_RULE=print_install_message\nelse\n\tINSTALL_LIB_RULE=\nendif\nifneq (,$(wildcard ./bin/$(BINDINGS_FILE))) \n    INSTALL_BINDINGS_RULE=install_bindings\n    PYTHONDIR=$(shell python3 -m site --user-site)/\n\tINSTALL_MESSAGE_RULE=print_install_message\nelse\n\tINSTALL_BINDINGS_RULE=\nendif\n\n# make install command\ninstall: $(INSTALL_LIB_RULE) $(INSTALL_BINDINGS_RULE) install_cli_tool $(INSTALL_MESSAGE_RULE)\n\ninstall_lib:\n\tinstall -d $(DESTDIR)$(PREFIX)/lib/\n\tinstall -D $(OUTDIR)/$(LIB_FILE) $(DESTDIR)$(PREFIX)/lib/\n\tinstall -d $(DESTDIR)$(PREFIX)/include/\n\tinstall -D $(INCLUDEDIR)/$(LIB_HEADER_FILE) $(DESTDIR)$(PREFIX)/include/\n\ninstall_bindings:\n\tinstall -d $(PYTHONDIR)\n\tinstall -D $(OUTDIR)/$(BINDINGS_FILE) $(PYTHONDIR)\n\ninstall_cli_tool:\n\tinstall -d $(DESTDIR)$(PREFIX)/bin/\n\tinstall -D cli-tool/ropium $(DESTDIR)$(PREFIX)/bin/\n\nprint_install_message:\n\t@echo \"\\nROPium was successfully installed.\"\n\n# make test command\ntest:\n\t$(OUTDIR)/tests\n\n# cleaning \ncleanall: clean\n\nclean:\n\trm -f $(OBJS)\n\trm -f $(DEPOBJS)\n\trm -f $(TESTOBJS)\n\trm -f $(BINDINGS_OBJS)\n\trm -f `find . -type f -name \"*.gch\"`\n\trm -f $(OUTDIR)/*\n"
  },
  {
    "path": "README.md",
    "content": "\n\n<p align=\"center\" >\n<img width=50% src=\"/ressources/ropium_logo.png\"/><br /><br /><br />\n</p>\n\n# About\n**ROPium** (ex-ROPGenerator) is a library/tool that makes ROP-exploits easy. It automatically extracts and analyses gadgets from binaries and\nlets you find ROP-chains with semantic queries. ROPium supports *X86* and *X64* architectures, soon to be \nextended with *ARM*.\n\nKey features:\n\n   - **Effortless**: ROPium works out-of-the-box with a smooth Command Line Interface\n   - **Python API**: It is easy to integrate ROPium in script thanks to its python API\n   - **Automatic chaining**: ROPium automatically combines gadgets to create complex ROP-chains\n   - **Advanced features**: ROPium supports function calls for various ABIs, syscalls, ...\n   - **Semantic queries**: ROPium queries are quick and convenient to write : ``rax=rbx+8``, ``[rdi+0x20]=rax``, ``rsi=[rbx+16]``, ``0x08040212(1, 2, rax)``, ``[0xdeadbeaf] = \"/bin/sh\\x00\"``, ``sys_execve(0xdeadbeef, 0, 0)``, ``sys_0x1(0)``, ``...``\n\n\n# Content\n- [About](#about)\n- [Installation](#installation)\n- [Getting started](#getting-started)\n   - [CLI tool](#cli-tool)\n   - [Python API](#python-api)\n- [Docker](#docker)\n- [Contact](#contact)\n- [Licence](#licence)\n- [Special thanks](#special-thanks)\n\n# Installation\nFirst install the [Capstone](https://github.com/aquynh/capstone) disassembly framework: \n\n      sudo apt-get install libcapstone-dev\n\nYou also need the latest [ROPgadget](https://github.com/JonathanSalwan/ROPgadget) release: \n\n      git clone https://github.com/JonathanSalwan/ROPgadget && cd ROPgadget\n      python setup.py install --user \n\nTo use the CLI tool, install [prompt_toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit):\n      \n      pip3 install prompt_toolkit\n\nFinally install **ROPium**:\n\n      git clone https://github.com/Boyan-MILANOV/ropium && cd ropium\n      make\n      make test\n      sudo make install \n\n# Getting started\n\n### CLI tool\n\nThanks to a Command-Line-Interface wrapper, you can use ROPium interactively to quickly build ropchains:  \n\n<p align=\"center\">\n  <img src=\"/ressources/ropium.gif\" width=\"800\" align=\"middle\">\n</p>\n\n### Python API\n\nDo you need to integrate ropchains directly in your scripts ? Good news, ROPium has a python API !\n\nLoading a binary and finding ropchains:\n```Python\nfrom ropium import *\nrop = ROPium(ARCH.X64)\nrop.load('/lib/x86_64-linux-gnu/libc-2.27.so')\n\nchain = rop.compile('rbx = [rax + 0x20]')\n```\n\nDumping a ropchain in various formats:\n```Python\n>>> print( chain.dump() )\n\n0x000000000009a851 (sub rax, 0x10; ret)\n0x0000000000130018 (mov rax, qword ptr [rax + 0x30]; ret)\n0x0000000000052240 (push rax; pop rbx; ret)\n\n>>> print(chain.dump('python'))\n\nfrom struct import pack\noff = 0x0\np = ''\np += pack('<Q', 0x000000000009a851+off) # sub rax, 0x10; ret\np += pack('<Q', 0x0000000000130018+off) # mov rax, qword ptr [rax + 0x30]; ret\np += pack('<Q', 0x0000000000052240+off) # push rax; pop rbx; ret\n\n>>> print(chain.dump('raw'))\n\nb'Q\\xa8\\t\\x00\\x00\\x00\\x00\\x00\\x18\\x00\\x13\\x00\\x00\\x00\\x00\\x00@\"\\x05\\x00\\x00\\x00\\x00\\x00'\n```\n\nSet constraints on ropchains:\n```Python\n# Bytes that should not appear in the ropchain\nrop.bad_bytes = [0x00, 0x0a, 0x0b]\n\n# Register that should not be clobbered by the ropchain\nrop.keep_regs = ['rsi', 'rdx']\n\n# Enable/Forbid ropchain to dereference registers that might hold invalid addresses\n# Safe mode is 'True' by default\nrop.safe_mem = False\n\n# Specify which ABI you want to use when calling functions\nrop.abi = ABI.X86_CDECL\n\n# Specify which system to target when doing syscalls\nrop.os = OS.LINUX\n```\n\n# Docker\n\nIf needed you can run ROPium in a docker container. The container can be generated from the *Dockerfile* as\nfollows:\n\n```bash\n# Create your docker image (this will take time!)\ndocker build . --tag ropium\n\n# Run the image in interactive mode, bind mounting the file to analyze\ndocker run --rm -it -v /FULL/HOST/PATH/FILE:/tmp/FILE:ro ropium\n\n(ropium)> load -a X86 /tmp/FILE\n```\nThe actual image is around 200 MB based on a Debian Stretch with a Python 3.7.3 installed. \n\n# Contact\n\n**Boyan MILANOV** - boyan.milanov (at) hotmail (dot) fr\n\n# Licence\nROPium is provided under the MIT licence.\n\n# Special thanks\nContributors:\n   -  Docker container support: [migounette](https://github.com/migounette), [clslgrnc](https://github.com/clslgrnc)\n\nROPium uses the following awesome projects: \n   - [capstone](https://github.com/aquynh/capstone) : Disassembly Framework\n   - [ROPgadget](https://github.com/JonathanSalwan/ROPgadget) : Gadget extractor\n   - [prompt-toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit) : Python CLI interface library\n\n"
  },
  {
    "path": "bin/.gitignore",
    "content": "# Ignore everything in this directory\n*\n# Except this file\n!.gitignore\n"
  },
  {
    "path": "bindings/py_arch.cpp",
    "content": "#include \"python_bindings.hpp\"\n#include \"arch.hpp\"\n#include \"compiler.hpp\"\n\nvoid init_arch(PyObject* module){\n    /* ARCH enum */\n    PyObject* arch_enum = PyDict_New();\n    PyDict_SetItemString(arch_enum, \"X86\", PyLong_FromLong((int)ArchType::X86));\n    PyDict_SetItemString(arch_enum, \"X64\", PyLong_FromLong((int)ArchType::X64));\n    PyDict_SetItemString(arch_enum, \"ARM32\", PyLong_FromLong((int)ArchType::ARM32));\n    PyDict_SetItemString(arch_enum, \"ARM64\", PyLong_FromLong((int)ArchType::ARM64));\n    PyObject* arch_class = create_class(PyUnicode_FromString(\"ARCH\"), PyTuple_New(0), arch_enum);\n    PyModule_AddObject(module, \"ARCH\", arch_class);\n\n    /* OS enum */\n    PyObject* os_enum = PyDict_New();\n    PyDict_SetItemString(os_enum, \"LINUX\", PyLong_FromLong((int)System::LINUX));\n    PyDict_SetItemString(os_enum, \"WINDOWS\", PyLong_FromLong((int)System::WINDOWS));\n    PyDict_SetItemString(os_enum, \"NONE\", PyLong_FromLong((int)System::NONE));\n    PyObject* os_class = create_class(PyUnicode_FromString(\"OS\"), PyTuple_New(0), os_enum);\n    PyModule_AddObject(module, \"OS\", os_class);\n\n    /* ABI enum */\n    PyObject* abi_enum = PyDict_New();\n    PyDict_SetItemString(abi_enum, \"X86_CDECL\", PyLong_FromLong((int)ABI::X86_CDECL));\n    PyDict_SetItemString(abi_enum, \"X86_STDCALL\", PyLong_FromLong((int)ABI::X86_STDCALL));\n    PyDict_SetItemString(abi_enum, \"X64_SYSTEM_V\", PyLong_FromLong((int)ABI::X64_SYSTEM_V));\n    PyDict_SetItemString(abi_enum, \"X64_MS\", PyLong_FromLong((int)ABI::X64_MS));\n    PyDict_SetItemString(abi_enum, \"NONE\", PyLong_FromLong((int)ABI::NONE));\n    PyObject* abi_class = create_class(PyUnicode_FromString(\"ABI\"), PyTuple_New(0), abi_enum);\n    PyModule_AddObject(module, \"ABI\", abi_class);\n    \n    /* X86 registers enum */\n    PyObject* x86_enum = PyDict_New();\n    PyDict_SetItemString(x86_enum, \"EAX\", PyLong_FromLong(X86_EAX));\n    PyDict_SetItemString(x86_enum, \"EBX\", PyLong_FromLong(X86_EBX));\n    PyDict_SetItemString(x86_enum, \"ECX\", PyLong_FromLong(X86_ECX));\n    PyDict_SetItemString(x86_enum, \"EDX\", PyLong_FromLong(X86_EDX));\n    PyDict_SetItemString(x86_enum, \"EDI\", PyLong_FromLong(X86_EDI));\n    PyDict_SetItemString(x86_enum, \"ESI\", PyLong_FromLong(X86_ESI));\n    PyDict_SetItemString(x86_enum, \"EBP\", PyLong_FromLong(X86_EBP));\n    PyDict_SetItemString(x86_enum, \"ESP\", PyLong_FromLong(X86_ESP));\n    PyDict_SetItemString(x86_enum, \"EIP\", PyLong_FromLong(X86_EIP));\n    PyDict_SetItemString(x86_enum, \"CS\", PyLong_FromLong(X86_CS));\n    PyDict_SetItemString(x86_enum, \"DS\", PyLong_FromLong(X86_DS));\n    PyDict_SetItemString(x86_enum, \"ES\", PyLong_FromLong(X86_ES));\n    PyDict_SetItemString(x86_enum, \"FS\", PyLong_FromLong(X86_FS));\n    PyDict_SetItemString(x86_enum, \"GS\", PyLong_FromLong(X86_GS));\n    PyDict_SetItemString(x86_enum, \"SS\", PyLong_FromLong(X86_SS));\n    PyDict_SetItemString(x86_enum, \"CF\", PyLong_FromLong(X86_CF));\n    PyDict_SetItemString(x86_enum, \"PF\", PyLong_FromLong(X86_PF));\n    PyDict_SetItemString(x86_enum, \"AF\", PyLong_FromLong(X86_AF));\n    PyDict_SetItemString(x86_enum, \"ZF\", PyLong_FromLong(X86_ZF));\n    PyDict_SetItemString(x86_enum, \"SF\", PyLong_FromLong(X86_SF));\n    PyDict_SetItemString(x86_enum, \"TF\", PyLong_FromLong(X86_TF));\n    PyDict_SetItemString(x86_enum, \"IF\", PyLong_FromLong(X86_IF));\n    PyDict_SetItemString(x86_enum, \"DF\", PyLong_FromLong(X86_DF));\n    PyDict_SetItemString(x86_enum, \"OF\", PyLong_FromLong(X86_OF));\n    PyDict_SetItemString(x86_enum, \"IOPL\", PyLong_FromLong(X86_IOPL));\n    PyDict_SetItemString(x86_enum, \"NT\", PyLong_FromLong(X86_NT));\n    PyDict_SetItemString(x86_enum, \"RF\", PyLong_FromLong(X86_RF));\n    PyDict_SetItemString(x86_enum, \"VM\", PyLong_FromLong(X86_VM));\n    PyDict_SetItemString(x86_enum, \"AC\", PyLong_FromLong(X86_AC));\n    PyDict_SetItemString(x86_enum, \"VIF\", PyLong_FromLong(X86_VIF));\n    PyDict_SetItemString(x86_enum, \"VIP\", PyLong_FromLong(X86_VIP));\n    PyDict_SetItemString(x86_enum, \"ID\", PyLong_FromLong(X86_ID));\n    PyDict_SetItemString(x86_enum, \"TSC\", PyLong_FromLong(X86_TSC));\n    PyDict_SetItemString(x86_enum, \"NB_REGS\", PyLong_FromLong(X86_NB_REGS));\n    PyObject* x86_class = create_class(PyUnicode_FromString(\"X86\"), PyTuple_New(0), arch_enum);\n    PyModule_AddObject(module, \"X86\", x86_class);\n    \n    /* X64 registers enum */\n    PyObject* x64_enum = PyDict_New();\n    PyDict_SetItemString(x64_enum, \"RAX\", PyLong_FromLong(X64_RAX));\n    PyDict_SetItemString(x64_enum, \"RBX\", PyLong_FromLong(X64_RBX));\n    PyDict_SetItemString(x64_enum, \"RCX\", PyLong_FromLong(X64_RCX));\n    PyDict_SetItemString(x64_enum, \"RDX\", PyLong_FromLong(X64_RDX));\n    PyDict_SetItemString(x64_enum, \"RDI\", PyLong_FromLong(X64_RDI));\n    PyDict_SetItemString(x64_enum, \"RSI\", PyLong_FromLong(X64_RSI));\n    PyDict_SetItemString(x64_enum, \"RBP\", PyLong_FromLong(X64_RBP));\n    PyDict_SetItemString(x64_enum, \"RSP\", PyLong_FromLong(X64_RSP));\n    PyDict_SetItemString(x64_enum, \"RIP\", PyLong_FromLong(X64_RIP));\n    PyDict_SetItemString(x64_enum, \"R8\", PyLong_FromLong(X64_R8));\n    PyDict_SetItemString(x64_enum, \"R9\", PyLong_FromLong(X64_R9));\n    PyDict_SetItemString(x64_enum, \"R10\", PyLong_FromLong(X64_R10));\n    PyDict_SetItemString(x64_enum, \"R11\", PyLong_FromLong(X64_R11));\n    PyDict_SetItemString(x64_enum, \"R12\", PyLong_FromLong(X64_R12));\n    PyDict_SetItemString(x64_enum, \"R13\", PyLong_FromLong(X64_R13));\n    PyDict_SetItemString(x64_enum, \"R14\", PyLong_FromLong(X64_R14));\n    PyDict_SetItemString(x64_enum, \"R15\", PyLong_FromLong(X64_R15));\n    PyDict_SetItemString(x64_enum, \"CS\", PyLong_FromLong(X64_CS));\n    PyDict_SetItemString(x64_enum, \"DS\", PyLong_FromLong(X64_DS));\n    PyDict_SetItemString(x64_enum, \"ES\", PyLong_FromLong(X64_ES));\n    PyDict_SetItemString(x64_enum, \"FS\", PyLong_FromLong(X64_FS));\n    PyDict_SetItemString(x64_enum, \"GS\", PyLong_FromLong(X64_GS));\n    PyDict_SetItemString(x64_enum, \"SS\", PyLong_FromLong(X64_SS));\n    PyDict_SetItemString(x64_enum, \"CF\", PyLong_FromLong(X64_CF));\n    PyDict_SetItemString(x64_enum, \"PF\", PyLong_FromLong(X64_PF));\n    PyDict_SetItemString(x64_enum, \"AF\", PyLong_FromLong(X64_AF));\n    PyDict_SetItemString(x64_enum, \"ZF\", PyLong_FromLong(X64_ZF));\n    PyDict_SetItemString(x64_enum, \"SF\", PyLong_FromLong(X64_SF));\n    PyDict_SetItemString(x64_enum, \"TF\", PyLong_FromLong(X64_TF));\n    PyDict_SetItemString(x64_enum, \"IF\", PyLong_FromLong(X64_IF));\n    PyDict_SetItemString(x64_enum, \"DF\", PyLong_FromLong(X64_DF));\n    PyDict_SetItemString(x64_enum, \"OF\", PyLong_FromLong(X64_OF));\n    PyDict_SetItemString(x64_enum, \"IOPL\", PyLong_FromLong(X64_IOPL));\n    PyDict_SetItemString(x64_enum, \"NT\", PyLong_FromLong(X64_NT));\n    PyDict_SetItemString(x64_enum, \"RF\", PyLong_FromLong(X64_RF));\n    PyDict_SetItemString(x64_enum, \"VM\", PyLong_FromLong(X64_VM));\n    PyDict_SetItemString(x64_enum, \"AC\", PyLong_FromLong(X64_AC));\n    PyDict_SetItemString(x64_enum, \"VIF\", PyLong_FromLong(X64_VIF));\n    PyDict_SetItemString(x64_enum, \"VIP\", PyLong_FromLong(X64_VIP));\n    PyDict_SetItemString(x64_enum, \"ID\", PyLong_FromLong(X64_ID));\n    PyDict_SetItemString(x64_enum, \"TSC\", PyLong_FromLong(X64_TSC));\n    PyDict_SetItemString(x64_enum, \"NB_REGS\", PyLong_FromLong(X64_NB_REGS));\n    PyObject* x64_class = create_class(PyUnicode_FromString(\"X64\"), PyTuple_New(0), arch_enum);\n    PyModule_AddObject(module, \"X64\", x64_class);\n};\n"
  },
  {
    "path": "bindings/py_module.cpp",
    "content": "#include \"Python.h\"\n#include \"python_bindings.hpp\"\n\n/* Module methods */\nPyMethodDef module_methods[] = {\n    {\"ROPium\", (PyCFunction)ropium_ROPium, METH_VARARGS, \"Create a new ROPium instance\"},\n    {NULL}\n};\n\n/* Module information */\nPyModuleDef ropium_module_def = {\n    PyModuleDef_HEAD_INIT,\n    \"ropium\",\n    nullptr,\n    -1,      // m_size\n    module_methods, // m_methods\n    nullptr, // m_slots\n    nullptr, // m_traverse\n    nullptr, // m_clear\n    nullptr  // m_free    \n};\n\nPyMODINIT_FUNC PyInit_ropium(){\n    Py_Initialize();\n    PyObject* module = PyModule_Create(&ropium_module_def);\n    \n    init_arch(module);\n    init_ropchain(module);\n    return module;\n}\n"
  },
  {
    "path": "bindings/py_ropchain.cpp",
    "content": "#include \"python_bindings.hpp\"\n#include <cstdio>\n\n/* -------------------------------------\n *          ROPChain object\n * ------------------------------------ */\n\nstatic void ropchain_dealloc(PyObject* self){\n    delete ((ropchain_Object*)self)->ropchain;  ((ropchain_Object*)self)->ropchain = nullptr;\n    Py_TYPE(self)->tp_free((PyObject *)self);\n};\n\nstatic PyObject* ropchain_str(PyObject* self){\n    stringstream ss;\n    ss << *(as_ropchain_object(self).ropchain);\n    return PyUnicode_FromString(ss.str().c_str());\n};\n\nstatic int ropchain_print(PyObject* self){\n    std::cout << *(as_ropchain_object(self).ropchain);\n    return 0;\n};\n\nstatic PyObject* ropchain_repr(PyObject* self){\n    return ropchain_str(self);\n};\n\nstatic PyObject* ropchain_dump(PyObject* self, PyObject* args, PyObject* keywords){\n    char* arg = \"pretty\"; // Default\n    string fmt;\n    stringstream ss;\n    int color = 1;\n    vector<uint8_t> raw;\n    PyObject* res;\n    char* tab = \"\";\n\n    char* keywd[] = {\"\", \"tab\", \"color\", NULL};\n\n    if( ! PyArg_ParseTupleAndKeywords(args, keywords, \"|ssp\", keywd, &arg, &tab, &color)){\n        return NULL;\n    }\n\n    fmt = string(arg);\n\n    if(!color){\n        disable_colors();\n    }\n\n    if( fmt == \"pretty\" ){\n        as_ropchain_object(self).ropchain->print_pretty(ss, string(tab));\n        res = PyUnicode_FromString(ss.str().c_str());\n    }else if( fmt == \"python\" ){\n        as_ropchain_object(self).ropchain->print_python(ss, string(tab));\n        res = PyUnicode_FromString(ss.str().c_str());\n    }else if( fmt == \"raw\" ){\n        as_ropchain_object(self).ropchain->dump_raw(raw);\n         // Translate vector into python bytes\n        res = PyBytes_FromStringAndSize((char*)raw.data(), raw.size());\n    }else\n        return PyErr_Format(PyExc_ValueError, \"Unknown dump format: %s\", arg);\n\n    enable_colors();\n    \n    if( res == NULL ){\n        return PyErr_Format(PyExc_RuntimeError, \"%s\", \"Failed to dump ropchain\");\n    }\n\n    return res;\n}\n\nstatic PyMethodDef ropchain_methods[] = {\n    {\"dump\", (PyCFunction)ropchain_dump, METH_VARARGS | METH_KEYWORDS, \"dump(fmt='pretty', tab='', color=True) \\nDump the ropchain in various formats. Available formats: 'pretty', 'python', 'raw'\"},\n    {NULL, NULL, 0, NULL}\n};\n\nstatic PyMemberDef ropchain_members[] = {\n    {NULL}\n};\n\nstatic PyNumberMethods ropchain_operators; // Empty PyNumberMethods, will be filled in the init function\n\n/* Type description for python Expr objects */\nPyTypeObject ropchain_Type = {\n    PyVarObject_HEAD_INIT(NULL, 0)\n    \"ropchain\",                         /* tp_name */\n    sizeof(ropchain_Object),            /* tp_basicsize */\n    0,                                        /* tp_itemsize */\n    (destructor)ropchain_dealloc,       /* tp_dealloc */\n    (printfunc)ropchain_print,                                        /* tp_print */\n    0,                                        /* tp_getattr */\n    0,                                        /* tp_setattr */\n    0,                                        /* tp_reserved */\n    ropchain_repr,                            /* tp_repr */\n    &ropchain_operators,                      /* tp_as_number */\n    0,                                        /* tp_as_sequence */\n    0,                                        /* tp_as_mapping */\n    0,                                        /* tp_hash  */\n    0,                                        /* tp_call */\n    ropchain_str,                                        /* tp_str */\n    0,                                        /* tp_getattro */\n    0,                                        /* tp_setattro */\n    0,                                        /* tp_as_buffer */\n    Py_TPFLAGS_DEFAULT,                       /* tp_flags */\n    \"ROPChain object\",                        /* tp_doc */\n    0,                                        /* tp_traverse */\n    0,                                        /* tp_clear */\n    0,                                        /* tp_richcompare */\n    0,                                        /* tp_weaklistoffset */\n    0,                                        /* tp_iter */\n    0,                                        /* tp_iternext */\n    ropchain_methods,                         /* tp_methods */\n    ropchain_members,                         /* tp_members */\n    0,                                        /* tp_getset */\n    0,                                        /* tp_base */\n    0,                                        /* tp_dict */\n    0,                                        /* tp_descr_get */\n    0,                                        /* tp_descr_set */\n    0,                                        /* tp_dictoffset */\n    0,                                        /* tp_init */\n    0,                                        /* tp_alloc */\n    0,                                        /* tp_new */\n};\n\nPyObject* get_ropchain_Type(){\n    return (PyObject*)&ropchain_Type;\n};\n\n/* Constructor */\nPyObject* Pyropchain_FromROPChain(ROPChain* chain){\n    ropchain_Object* object;\n\n    // Create object\n    PyType_Ready(&ropchain_Type);\n    object = PyObject_New(ropchain_Object, &ropchain_Type);\n    if( object != nullptr ){\n        object->ropchain = chain;\n    }\n    return (PyObject*)object;\n}\n\n// Adding two ropchains\n/* Number methods & Various Constructors */\nstatic PyObject* ropchain_nb_add(PyObject* self, PyObject *other){\n    if( ! PyObject_IsInstance(other, (PyObject*)&(ropchain_Type))){\n        return PyErr_Format(PyExc_TypeError, \"Mismatching types for operator '+'\");\n    }\n    ROPChain * rop = new ROPChain(as_ropchain_object(self).ropchain->arch);\n    rop->add_chain(*(as_ropchain_object(self).ropchain));\n    rop->add_chain(*(as_ropchain_object(other).ropchain));\n    return Pyropchain_FromROPChain(rop);\n}\n\n/* -------------------------------------\n *          Init function\n * ------------------------------------ */\nvoid init_ropchain(PyObject* module){\n    /* Add number operators to ropchain */\n    ropchain_operators.nb_add = ropchain_nb_add;\n}\n"
  },
  {
    "path": "bindings/py_ropium.cpp",
    "content": "#include \"python_bindings.hpp\"\n#include <cstdio>\n#include <fstream>\n\n/* -------------------------------------\n *          ROPium object\n * ------------------------------------ */\n\nstatic void ROPium_dealloc(PyObject* self){\n    delete ((ROPium_Object*)self)->compiler;  ((ROPium_Object*)self)->compiler = nullptr;\n    delete ((ROPium_Object*)self)->arch;  ((ROPium_Object*)self)->arch = nullptr;\n    delete ((ROPium_Object*)self)->gadget_db;  ((ROPium_Object*)self)->gadget_db = nullptr;\n    delete ((ROPium_Object*)self)->constraint;  ((ROPium_Object*)self)->constraint = nullptr;\n    Py_TYPE(self)->tp_free((PyObject *)self);\n};\n\nstatic PyObject* ROPium_load(PyObject* self, PyObject* args){\n    const char* filename;\n    int filename_len;\n    int filenum = 0;\n    stringstream ss;\n    string gadget_file; \n    string ropgadget_tmp_file;\n    int max_filenum = 0x7fffffff; \n\n    vector<RawGadget>* raw = nullptr;\n\n    if( ! PyArg_ParseTuple(args, \"s#\", &filename, &filename_len) ){\n        return NULL;\n    }\n\n    // Get available file to dump gadgets\n    for( filenum = 0; filenum < max_filenum; filenum++){\n        ss.str(\"\");\n        ss << \".ropium_raw_gadgets.\" << filenum;\n        gadget_file = ss.str();\n        // Test if file exists\n        std::ifstream fin(gadget_file);\n        if( !fin ){\n            break; // File doesn't exist\n        }else{\n            fin.close(); // Try next filenum\n        }\n    }\n\n    if( filenum == max_filenum ){\n        return PyErr_Format(PyExc_RuntimeError, \"Couldn't create new file where to dump gadgets\");\n    }\n\n    ss.str(\"\");\n        ss << \".ropgadget_output.\" << filenum,\n        ropgadget_tmp_file = ss.str();\n\n    try{\n        // Try to load binary and get gadgets using ROPgadget for now\n        if( ! ropgadget_to_file(gadget_file, ropgadget_tmp_file, filename)){\n            return PyErr_Format(PyExc_RuntimeError, \"Couldn't analyse binary with ROPgadget\");\n        }\n        raw = raw_gadgets_from_file(gadget_file);\n        as_ropium_object(self).gadget_db->analyse_raw_gadgets(*raw, as_ropium_object(self).arch);\n        delete raw; raw = nullptr;\n        remove(gadget_file.c_str());\n        remove(ropgadget_tmp_file.c_str());\n    }catch(runtime_exception& e){\n        return PyErr_Format(PyExc_RuntimeError, \"%s\", e.what());\n    }\n\n    Py_RETURN_NONE;\n};\n\nstatic PyObject* ROPium_compile(PyObject* self, PyObject* args){\n    const char* query;\n    int query_len;\n    ROPChain* ropchain;\n\n    if( ! PyArg_ParseTuple(args, \"s#\", &query, &query_len) ){\n        return NULL;\n    }\n\n    try{\n        ropchain = as_ropium_object(self).compiler->compile( string(query, query_len), \n                as_ropium_object(self).constraint, as_ropium_object(self).abi, as_ropium_object(self).system); \n        if( ropchain ){\n            return Pyropchain_FromROPChain(ropchain);\n        }\n    }catch(il_exception& e){\n        return PyErr_Format(PyExc_ValueError, \"%s\", e.what());\n    }catch(runtime_exception& e){\n        return PyErr_Format(PyExc_RuntimeError, \"%s\", e.what());\n    }catch(compiler_exception& e){\n        return PyErr_Format(PyExc_RuntimeError, \"%s\", e.what());\n    }\n\n    Py_RETURN_NONE;\n};\n\nstatic PyMethodDef ROPium_methods[] = {\n    {\"load\", (PyCFunction)ROPium_load, METH_VARARGS, \"load(<filename>) \\nLoad and analyse gadgets from a binary\"},\n    {\"compile\", (PyCFunction)ROPium_compile, METH_VARARGS, \"compile(<query>) \\nCompile a semantic query into a ropchain\"},\n    {NULL, NULL, 0, NULL}\n};\n\n// Get/Set Attributes\nstatic PyObject* ROPium_get_bad_bytes(PyObject* self, void* closure){\n    PyObject* list;\n    \n    list = PyList_New(0);\n    if( list == NULL ){\n        return PyErr_Format(PyExc_RuntimeError, \"%s\", \"Failed to create new python list\");\n    }\n    // Add bad bytes to list\n    for (int i = 0; i < 0x100; i++){\n        if( !as_ropium_object(self).constraint->bad_bytes.is_valid_byte(i) ){\n            if( PyList_Append(list, PyLong_FromLong(i)) == -1){\n                return PyErr_Format(PyExc_RuntimeError, \"%s\", \"Failed to add bad byte to python list\");\n            }\n        }\n    }\n    return list;\n}\n\nstatic int ROPium_set_bad_bytes(PyObject* self, PyObject* list, void* closure){\n    PyObject *item;\n    Py_ssize_t size;\n\n    if( ! PyList_Check(list)){\n        PyErr_SetString(PyExc_RuntimeError, \"Expected a list of integers\");\n        return -1;\n    }\n\n    size = PyList_Size(list);\n    \n    // Clear previous bad bytes\n    as_ropium_object(self).constraint->bad_bytes.clear();\n    \n    // Add new bad bytes\n    for( int i = 0; i < size; i++){\n        item = PyList_GetItem(list, i);\n        if( item == NULL ){\n            PyErr_SetString(PyExc_RuntimeError, \"Error getting item in supplied list\");\n            return -1;\n        }\n        if( ! PyLong_Check(item) || PyLong_AsUnsignedLong(item) > 0xff ){\n            PyErr_SetString(PyExc_ValueError, \"Bad bytes list has incorrect element(s)\");\n            return -1;\n        }\n        // Add bad byte\n        as_ropium_object(self).constraint->bad_bytes.add_bad_byte(PyLong_AsUnsignedLong(item));\n    }\n\n    return 0;\n}\n\nstatic PyObject* ROPium_get_safe_mem(PyObject* self, void* closure){\n    \n    if( as_ropium_object(self).constraint->mem_safety.is_enforced())\n        Py_RETURN_TRUE;\n    else\n        Py_RETURN_FALSE;\n}\n\nstatic int ROPium_set_safe_mem(PyObject* self, PyObject* val, void* closure){\n\n    if( ! PyBool_Check(val)){\n        PyErr_SetString(PyExc_RuntimeError, \"Excepted a boolean value\");\n        return -1;\n    }\n    \n    if( val == Py_True ){\n        as_ropium_object(self).constraint->mem_safety.force_safe();\n    }else{\n        as_ropium_object(self).constraint->mem_safety.enable_unsafe();\n    }\n\n    return 0;\n}\n\nstatic PyObject* ROPium_get_keep_regs(PyObject* self, void* closure){\n    PyObject* list;\n    \n    list = PyList_New(0);\n    if( list == NULL ){\n        return PyErr_Format(PyExc_RuntimeError, \"%s\", \"Failed to create new python list\");\n    }\n    // Add bad bytes to list\n    for (int i = 0; i < as_ropium_object(self).arch->nb_regs; i++){\n        if( as_ropium_object(self).constraint->keep_regs.is_kept(i) ){\n            if( PyList_Append(list, PyUnicode_FromString( as_ropium_object(self).arch->reg_name(i).c_str())) == -1){\n                return PyErr_Format(PyExc_RuntimeError, \"%s\", \"Failed to add register name to python list\");\n            }\n        }\n    }\n    return list;\n}\n\nstatic int ROPium_set_keep_regs(PyObject* self, PyObject* list, void* closure){\n    PyObject *item;\n    Py_ssize_t size;\n    string name;\n    int reg_num;\n\n    if( ! PyList_Check(list)){\n        PyErr_SetString(PyExc_RuntimeError, \"Expected a list of str\");\n        return -1;\n    }\n\n    size = PyList_Size(list);\n    \n    // Clear previous regs\n    as_ropium_object(self).constraint->keep_regs.clear();\n\n    // Add new regs\n    for( int i = 0; i < size; i++){\n        item = PyList_GetItem(list, i);\n        if( item == NULL ){\n            PyErr_SetString(PyExc_RuntimeError, \"Error getting item in supplied list\");\n            return -1;\n        }\n        if( ! PyUnicode_Check(item) ){\n            PyErr_SetString(PyExc_ValueError, \"Registers must be specified as strings: 'eax', 'ebx', ...\");\n            return -1;\n        }\n        name = string((char*)PyUnicode_DATA(item));\n        try{\n            reg_num = as_ropium_object(self).arch->reg_num(name);\n        }catch(runtime_exception& e){\n            PyErr_Format(PyExc_ValueError, \"Invalid register: %s\", name.c_str());\n            return -1;\n        }\n        // Add keep reg\n        as_ropium_object(self).constraint->keep_regs.add_keep_reg(reg_num);\n    }\n\n    return 0;\n}\n\nstatic PyObject* ROPium_get_arch(PyObject* self, void* closure){\n    return PyLong_FromLong((int)(as_ropium_object(self).arch->type));\n}\n\nstatic PyObject* ROPium_get_abi(PyObject* self, void* closure){\n    return PyLong_FromLong((int)(as_ropium_object(self).abi));\n}\n\nstatic int ROPium_set_abi(PyObject* self, PyObject* val, void* closure){\n    int abi;\n\n    if( ! PyLong_Check(val)){\n        PyErr_SetString(PyExc_RuntimeError, \"Argument should be a ABI.* enum value\");\n        return -1;\n    }\n\n    abi = PyLong_AsLong(val);\n    as_ropium_object(self).abi = (ABI)abi;\n\n    return 0;\n}\n\nstatic PyObject* ROPium_get_os(PyObject* self, void* closure){\n    return PyLong_FromLong((int)(as_ropium_object(self).system));\n}\n\nstatic int ROPium_set_os(PyObject* self, PyObject* val, void* closure){\n    int system;\n\n    if( ! PyLong_Check(val)){\n        PyErr_SetString(PyExc_RuntimeError, \"Argument should be a OS.* enum value\");\n        return -1;\n    }\n\n    system = PyLong_AsLong(val);\n    as_ropium_object(self).system = (System)system;\n\n    return 0;\n}\n\n\nstatic PyGetSetDef ROPium_getset[] = {\n    {\"bad_bytes\", ROPium_get_bad_bytes, ROPium_set_bad_bytes, \"Bad bytes that must not occur in the ropchains\", NULL},\n    {\"keep_regs\", ROPium_get_keep_regs, ROPium_set_keep_regs, \"Registers that should not be clobbered by the ropchains\", NULL},\n    {\"safe_mem\", ROPium_get_safe_mem, ROPium_set_safe_mem, \"Indicates whether ropchains can contain gadgets that perform potentially unsafe register dereferencing\", NULL},\n    {\"arch\", ROPium_get_arch, NULL, \"Architecture type\", NULL},\n    {\"abi\", ROPium_get_abi, ROPium_set_abi, \"ABI to use when calling functions\", NULL},\n    {\"os\", ROPium_get_os, ROPium_set_os, \"OS to target when doing syscalls\", NULL},\n    {NULL}\n};\n\n\nstatic PyMemberDef ROPium_members[] = {\n    {NULL}\n};\n\n\n/* Type description for python Expr objects */\nPyTypeObject ROPium_Type = {\n    PyVarObject_HEAD_INIT(NULL, 0)\n    \"ROPium\",                         /* tp_name */\n    sizeof(ROPium_Object),            /* tp_basicsize */\n    0,                                        /* tp_itemsize */\n    (destructor)ROPium_dealloc,       /* tp_dealloc */\n    0,                                        /* tp_print */\n    0,                                        /* tp_getattr */\n    0,                                        /* tp_setattr */\n    0,                                        /* tp_reserved */\n    0,                                        /* tp_repr */\n    0,                                        /* tp_as_number */\n    0,                                        /* tp_as_sequence */\n    0,                                        /* tp_as_mapping */\n    0,                                        /* tp_hash  */\n    0,                                        /* tp_call */\n    0,                                        /* tp_str */\n    0,                                        /* tp_getattro */\n    0,                                        /* tp_setattro */\n    0,                                        /* tp_as_buffer */\n    Py_TPFLAGS_DEFAULT,                       /* tp_flags */\n    \"ROPium: automatic ropchain finder\",      /* tp_doc */\n    0,                                        /* tp_traverse */\n    0,                                        /* tp_clear */\n    0,                                        /* tp_richcompare */\n    0,                                        /* tp_weaklistoffset */\n    0,                                        /* tp_iter */\n    0,                                        /* tp_iternext */\n    ROPium_methods,                           /* tp_methods */\n    ROPium_members,                           /* tp_members */\n    ROPium_getset,                            /* tp_getset */\n    0,                                        /* tp_base */\n    0,                                        /* tp_dict */\n    0,                                        /* tp_descr_get */\n    0,                                        /* tp_descr_set */\n    0,                                        /* tp_dictoffset */\n    0,                                        /* tp_init */\n    0,                                        /* tp_alloc */\n    0,                                        /* tp_new */\n};\n\nPyObject* get_ROPium_Type(){\n    return (PyObject*)&ROPium_Type;\n};\n\n/* Constructor */\nPyObject* ropium_ROPium(PyObject* self, PyObject* args){\n    ROPium_Object* object;\n    int arch;\n\n    // Parse arguments\n    if( ! PyArg_ParseTuple(args, \"i\", &arch) ){\n        return NULL;\n    }\n\n    // Create object\n    try{\n        PyType_Ready(&ROPium_Type);\n        object = PyObject_New(ROPium_Object, &ROPium_Type);\n        if( object != nullptr ){\n            // Set constraint\n            object->constraint = new Constraint();\n            // Set architecture\n            switch ( (ArchType)arch){\n                case ArchType::X86: as_ropium_object(object).arch = new ArchX86(); break;\n                case ArchType::X64: as_ropium_object(object).arch = new ArchX64(); break;\n                default: return PyErr_Format(PyExc_ValueError, \"This architecture isn't supported yet\");\n            }\n            // Set gadget db\n            as_ropium_object(object).gadget_db = new GadgetDB();\n            // Set compiler\n            as_ropium_object(object).compiler = new ROPCompiler(object->arch, (object->gadget_db));\n            as_ropium_object(object).abi = ABI::NONE;\n            as_ropium_object(object).system = System::NONE;\n        }\n    }catch(runtime_exception& e){\n        return PyErr_Format(PyExc_RuntimeError, \"%s\", e.what());\n    }\n    return (PyObject*)object;\n}\n"
  },
  {
    "path": "bindings/python_bindings.hpp",
    "content": "#ifndef PYTHON_BINDINGS_INCLUDE_H\n#define PYTHON_BINDINGS_INCLUDE_H\n\n#include \"Python.h\"\n#include \"structmember.h\"\n#include \"exception.hpp\"\n#include \"arch.hpp\"\n#include \"database.hpp\"\n#include \"compiler.hpp\"\n#include \"systems.hpp\"\n\n/* -------------------------------------------------\n *                     Utils\n * ------------------------------------------------- */\n\nPyObject* create_class(PyObject* name, PyObject* bases, PyObject* dict);\n\n/* --------------------------------------------------\n *                   Arch\n *  -------------------------------------------------- */\n\nvoid init_arch(PyObject* module);\n\n/* --------------------------------------------------\n *                   Ropium\n *  -------------------------------------------------- */\n\ntypedef struct{\n    PyObject_HEAD\n    Arch* arch;\n    GadgetDB* gadget_db;\n    ROPCompiler* compiler;\n    Constraint* constraint;\n    ABI abi;\n    System system;\n} ROPium_Object;\nPyObject* get_ROPium_Type();\nPyObject* ropium_ROPium(PyObject* self, PyObject* args);\n#define as_ropium_object(x)  (*((ROPium_Object*)x))\n\n/* --------------------------------------------------\n *                   ROPChain\n *  -------------------------------------------------- */\n\nvoid init_ropchain(PyObject* module);\n\ntypedef struct{\n    PyObject_HEAD\n    ROPChain* ropchain;\n} ropchain_Object;\nPyObject* get_ropchain_Type();\nPyObject* Pyropchain_FromROPChain(ROPChain* chain);\n#define as_ropchain_object(x)  (*((ropchain_Object*)x))\n\n\n#endif\n"
  },
  {
    "path": "bindings/utils.cpp",
    "content": "#include \"python_bindings.hpp\"\n\nPyObject* create_class(PyObject* name, PyObject* bases, PyObject* dict){\n    PyObject* res = PyObject_CallFunctionObjArgs((PyObject*)&PyType_Type, name, bases, dict, NULL);\n    Py_CLEAR(name);\n    Py_CLEAR(bases);\n    Py_CLEAR(dict);\n    return res;\n}\n"
  },
  {
    "path": "cli-tool/ropium",
    "content": "#!/usr/bin/env python3\nfrom ropium import *\nfrom prompt_toolkit import PromptSession, ANSI\nimport os\n\n# Colors and util functions\nMAIN_COLOR_ANSI = '\\033[92m'    # Default color \nERROR_COLOR_ANSI = '\\033[91m' \nBOLD_COLOR_ANSI = '\\033[1m'\nWARNING_COLOR_ANSI = '\\033[93m'\nSPECIAL_COLOR_ANSI = '\\033[93m'\nEND_COLOR_ANSI = '\\033[0m'\n\ndef str_bold(msg):\n    return BOLD_COLOR_ANSI + msg + END_COLOR_ANSI\n\ndef str_error(msg):\n    return ERROR_COLOR_ANSI + msg + END_COLOR_ANSI\n\ndef str_main(msg):\n    return MAIN_COLOR_ANSI + msg + END_COLOR_ANSI\n    \ndef str_warning(msg):\n    return WARNING_COLOR_ANSI + msg + END_COLOR_ANSI\n\ndef str_special(msg):\n    return SPECIAL_COLOR_ANSI + msg + END_COLOR_ANSI\n\ndef error(msg, skip=False):\n    if skip:\n        print('')\n    print(\"\\t[\" + str_bold(str_error(\"!\")) + \"] \" + msg)\n\ndef info(msg):\n    print(\"\\t[\" + str_main(\"+\") + \"] \" + msg)\n    \ndef warning(msg, skip=False):\n    if skip:\n        print('')\n    print(\"\\t[\" + str_bold(str_warning(\"!\")) + \"] \" + msg)\n\n\ncompiler = None\nbad_bytes = []\nkeep_regs = []\nsafe_mem = True\nabi = ABI.NONE\nsystem = OS.NONE\nstart_msg = \"\\n\" + str_bold(\"ROPium\") + \" - v3.2\\n\"\n\n# Commands\nCMD_HELP = \"help\"\nCMD_LOAD = \"load\"\nCMD_FIND = \"find\"\nCMD_BADBYTES = \"badbytes\"\nCMD_KEEPREGS = \"keepregs\"\nCMD_ABI = \"abi\"\nCMD_OS = \"os\"\nCMD_SAFEMEM = \"safemem\"\nCMD_EXIT = \"exit\"\n\n# Arch correspondance\nstr_to_arch = {\"X86\":ARCH.X86, \"X64\":ARCH.X64}\n\n# Main function\ndef main():\n    print(start_msg)\n    \n    finish = False\n    promptSession = PromptSession(ANSI(u\"(\"+ str_main(u\"ropium\") +u\")> \"))\n    while( not finish ):\n        try:\n            user_input = promptSession.prompt()\n            args = user_input.split()\n            argslen = len(args)\n            if( argslen > 0 ):\n                command = args[0]\n            else:\n                command = None\n                continue\n\n            if( command == CMD_LOAD ):\n                try:\n                    load(args[1:])\n                except LoadException as e:\n                    error(str(e), skip=True)\n            elif( command == CMD_EXIT ):\n                finish = True\n            elif( command == CMD_HELP ):\n                if len(args) > 1:\n                    if args[1] == \"load\":\n                        print(load_help)\n                    elif args[1] == \"find\":\n                        print(find_help)\n                    elif args[1] == \"badbytes\":\n                        print(badbytes_help)\n                    elif args[1] == \"keepregs\":\n                        print(keepregs_help)\n                    elif args[1] == \"safemem\":\n                        print(safemem_help)\n                    elif args[1] == \"abi\":\n                        print(abi_help)\n                    else:\n                        print(main_help)\n                else:\n                    print(main_help)\n            elif( command == CMD_FIND ):\n                try:\n                    find(args[1:])\n                except FindException as e:\n                    error(str(e), skip=True)\n            elif( command == CMD_BADBYTES ):\n                try:\n                    badbytes(args[1:])\n                except ContextException as e:\n                    error(str(e), skip=True)\n            elif( command == CMD_KEEPREGS ):\n                try:\n                    keepregs(args[1:])\n                except ContextException as e:\n                    error(str(e), skip=True)\n            elif( command == CMD_SAFEMEM ):\n                try:\n                    safemem(args[1:])\n                except ContextException as e:\n                    error(str(e), skip=True)\n            elif( command == CMD_ABI ):\n                try:\n                    cmd_abi(args[1:])\n                except ContextException as e:\n                    error(str(e), skip=True)\n            elif( command == CMD_OS ):\n                try:\n                    cmd_os(args[1:])\n                except ContextException as e:\n                    error(str(e), skip=True)\n            else:\n                error(f\"Unknown command '{command}' (type 'help' for help)\", skip=True)\n            print('')\n        except KeyboardInterrupt:\n            pass\n        except EOFError:\n            finish = True\n    print('Thanks for using ROPium !')\n    return\n\n# Load command\nclass LoadException(Exception):\n    pass\n\ndef load(args):\n    global compiler\n    OPTIONS_ARCH = ['-a', '--arch']\n    OPTIONS_HELP = ['-h', '--help']\n    seen_arch = False\n    seen_filename = False\n    arch = None\n    filenames = []\n    compiler_was_none = False\n\n    # Parse arguments\n    if not args:\n        print(load_help)\n        return\n    i = 0\n    while i < len(args):\n        if args[i] in OPTIONS_ARCH:\n            if seen_arch:\n                raise LoadException(f\"Option '{args[i]}' can be used only one time\") \n            seen_arch = True\n            if( i+1 == len(args)):\n                raise LoadException(f\"Missing argument after {args[i]}\")\n            else:\n                arch = args[i+1]\n            i += 2\n        elif args[i] in OPTIONS_HELP:\n            print(load_help)\n            return\n        else:\n            filenames.append(args[i])\n            i += 1\n\n    # Check arguments\n    if not filenames:\n        raise LoadException(\"Missing filename\")\n    if not arch and not compiler:\n        raise LoadException(\"Missing architecture\")\n    if arch and (arch not in str_to_arch):\n        raise LoadException(f\"Unsupported architecture: {arch}\")\n\n    # Instanciate compiler if not already\n    if compiler is None:\n        compiler = ROPium(str_to_arch[arch])\n        compiler_was_none = True\n    elif compiler and arch and (str_to_arch[arch] != compiler.arch):\n        raise LoadException(f\"Already working on a different architecture than '{arch}'\")\n\n    loaded_at_least_one = False\n    print('') # So it's moar pretty\n    for f in filenames:\n        # Test if the file exists \n        if not os.path.isfile(f):\n            warning(f\"Skipped: {f} (file doesn't exist)\")\n        else:\n            compiler.load(f)\n            info(f\"Loaded: {f}\")\n            loaded_at_least_one = True\n\n    \n    if compiler_was_none and not loaded_at_least_one:\n        compiler = None\n\n\n# Find command\nclass FindException(Exception):\n    pass\n\ndef find(args):\n    global compiler\n    global bad_bytes\n    global keep_regs\n    global safemem\n    global abi\n    global system\n\n    if not compiler:\n        raise FindException(\"You must load a binary before finding ropchains\")\n\n    query = \"\".join(args)\n    compiler.bad_bytes = bad_bytes\n    compiler.keep_regs = keep_regs\n    compiler.safe_mem = safe_mem\n    compiler.abi = abi\n    compiler.os = system\n    try:\n        ropchain = compiler.compile(query)\n    except ValueError as e:\n        raise FindException(str(e))\n    except RuntimeError as e:\n        raise FindException(str(e))\n    if ropchain:\n        print('')\n        print(ropchain.dump(tab=\"\\t\"))\n    else:\n        print(\"\\n\\tNo ROPChain found.\")\n\n# Badbytes command\nclass ContextException(Exception):\n    pass\n\ndef badbytes(args):\n    global compiler\n\n    if not args:\n        print(badbytes_help)\n        return\n    \n    subcommand = args[0]\n    if subcommand == \"set\":\n        set_badbytes(args[1:])\n    elif subcommand == \"reset\":\n        reset_badbytes(args[1:])\n    else:\n        raise ContextException(f\"Unsupported action '{subcommand}'\")\n\ndef str_to_byte(s):\n    try:\n        return int(s, 10)\n    except:\n        try:\n            return int(s, 16)\n        except:\n            return None\n\n\n\ndef set_badbytes(args):\n    global bad_bytes\n    new_bad = []\n    for arg in args:\n        bad = str_to_byte(arg)\n        if bad is None or bad > 0xff:\n            raise ContextException(f\"'{arg}' is not a valid byte\")\n        new_bad.append(bad)\n    bad_bytes = new_bad\n\ndef reset_badbytes(args):\n    global bad_bytes\n    bad_bytes = []\n\n\n# Keppregs command\ndef keepregs(args):\n    global compiler\n\n    if not args:\n        print(keepregs_help)\n        return\n    \n    subcommand = args[0]\n    if subcommand == \"set\":\n        set_keepregs(args[1:])\n    elif subcommand == \"reset\":\n        reset_keepregs(args[1:])\n    else:\n        raise ContextException(f\"Unsupported action '{subcommand}'\")\n\nreg_map = {\n    \"eax\":X86.EAX,\n    \"ebx\":X86.EBX,\n    \"ecx\":X86.ECX,\n    \"edx\":X86.EDX,\n    \"esi\":X86.ESI,\n    \"edi\":X86.EDI,\n    \"esp\":X86.ESP,\n    \"ebp\":X86.EBP,\n    \"eip\":X86.EIP,\n    \n    \"rax\":X64.RAX,\n    \"rbx\":X64.RBX,\n    \"rcx\":X64.RBX,\n    \"rdx\":X64.RDX,\n    \"rdi\":X64.RDI,\n    \"rsi\":X64.RSI,\n    \"rsp\":X64.RSP,\n    \"rbp\":X64.RBP,\n    \"rip\":X64.RIP,\n    \"r8\":X64.R8,\n    \"r9\":X64.R9,\n    \"r10\":X64.R10,\n    \"r11\":X64.R11,\n    \"r12\":X64.R12,\n    \"r13\":X64.R13,\n    \"r14\":X64.R14,\n    \"r15\":X64.R15\n}\n\ndef str_to_reg(s):\n    if s in reg_map:\n        return reg_map[s]\n    else:\n        return None\n\ndef set_keepregs(args):\n    global keep_regs\n    new_keep = []\n    for arg in args:\n        reg = str_to_reg(arg)\n        if reg is None:\n            raise ContextException(f\"Register '{arg}' is not supported for 'keepregs'\")\n        new_keep.append(arg)\n    keep_regs = new_keep\n\ndef reset_keepregs(args):\n    global keep_regs\n    keep_regs = []\n\n\n# safemem command\ndef safemem(args):\n    global compiler\n    global safe_mem\n\n    if not args:\n        print(safemem_help)\n        return\n    \n    subcommand = args[0]\n    if subcommand == \"set\":\n        safe_mem = True\n    elif subcommand == \"unset\":\n        safe_mem = False\n    else:\n        raise ContextException(f\"Unsupported action '{subcommand}'\")\n    \n    if len(args) > 1:\n        args_str = ' '.join(args[1:])\n        warning(f\"Extra arguments ignored: '{args_str}'\", skip=True)\n\n# ABI Command\nstr_to_abi = {\n    \"X86_CDECL\":ABI.X86_CDECL,\n    \"X86_STDCALL\":ABI.X86_STDCALL,\n    \"X64_SYSTEM_V\":ABI.X64_SYSTEM_V,\n    \"X64_MS\":ABI.X64_MS\n}\n\ndef cmd_abi(args):\n    global compiler\n\n    if not args:\n        print(abi_help)\n        return\n    \n    subcommand = args[0]\n    if subcommand == \"set\":\n        set_abi(args[1:])\n    else:\n        raise ContextException(f\"Unsupported action '{subcommand}'\")\n\ndef set_abi(args):\n    global abi\n    \n    if not args:\n        raise ContextException(f\"Missing ABI argument\")\n    \n    if args[0] not in str_to_abi:\n        raise ContextException(f\"Unsupported ABI: '{args[0]}'\")\n    else:\n        abi = str_to_abi[args[0]]\n        \n    if len(args) > 1:\n        extra_args = ' '.join(args[1:])\n        warning(f\"Extra arguments ignored: '{extra_args}'\", skip=True)\n        \n# OS Command\nstr_to_os = {\n    \"LINUX\":OS.LINUX,\n    \"WINDOWS\":OS.WINDOWS\n}\n\ndef cmd_os(args):\n    global compiler\n\n    if not args:\n        print(os_help)\n        return\n    \n    subcommand = args[0]\n    if subcommand == \"set\":\n        set_os(args[1:])\n    else:\n        raise ContextException(f\"Unsupported action '{subcommand}'\")\n\ndef set_os(args):\n    global system\n    \n    if not args:\n        raise ContextException(f\"Missing OS argument\")\n    \n    if args[0] not in str_to_os:\n        raise ContextException(f\"Unsupported Operating System: '{args[0]}'\")\n    else:\n        system = str_to_os[args[0]]\n        \n    if len(args) > 1:\n        extra_args = ' '.join(args[1:])\n        warning(f\"Extra arguments ignored: '{extra_args}'\", skip=True)\n\n# Help strings\nmain_help = str_main(str_bold('\\n\\tMain Commands'))\nmain_help += str_special(\"\\n\\t(For more info about a command type 'help <cmd>')\")\nmain_help += '\\n\\n\\t' + str_bold(CMD_LOAD) + ': \\t\\tload gadgets from a binary file'\nmain_help += '\\n\\t' + str_bold(CMD_FIND) + ': \\t\\tFind ropchains using semantic queries'\nmain_help += '\\n\\n\\t' + str_bold(CMD_BADBYTES) + ': \\tSet bad bytes to be avoided in ropchains'\nmain_help += '\\n\\t' + str_bold(CMD_KEEPREGS) + ': \\tSet registers that must not be clobbered'\nmain_help += '\\n\\t' + str_bold(CMD_SAFEMEM) + ': \\tEnable/Disable the use of unsafe gadgets'\nmain_help += '\\n\\t' + str_bold(CMD_ABI) + ': \\t\\tSpecify the ABI to use when calling functions'\nmain_help += '\\n\\t' + str_bold(CMD_OS) + ': \\t\\tSpecify the OS to target when doing syscalls'\nmain_help += '\\n\\n\\t' + str_bold(CMD_HELP) + ': \\t\\tshow this help'\nmain_help += '\\n\\t' + str_bold(CMD_EXIT) + ': \\t\\texit ROPium'\n\nload_help = str_main(str_bold(\"\\n\\t'load' Command\"))\nload_help += str_special(\"\\n\\t(Load gadgets from a binary file)\")\nload_help += \"\\n\\n\\t\"+str_bold(\"Usage\")+\":\\tload [OPTIONS] <filename> [<filename> ...]\"\nload_help += \"\\n\\n\\t\"+str_bold(\"Options\")+\":\"\nload_help += str_special(\"\\n\\t\\t-a,--arch <arch>\")+\"  architecture to use for gadget\" +\"\\n\\t\\t\\t\\t  disassembly/analysis\"\nload_help += \"\\n\\n\\t\"+str_bold(\"Supported achitectures\")+\": \"+', '.join([str_special(s) for s in str_to_arch])\nload_help += \"\\n\\n\\t\"+str_bold(\"Examples\")+\":\\n\\t\\tload -a X86 /bin/bash \\n\\t\\tload -a X64 ../my_binary1 ../my_binary2 \"\n\nfind_help = str_main(str_bold(\"\\n\\t'find' Command\"))\nfind_help += str_special(\"\\n\\t(Automatically find ropchains)\")\nfind_help += \"\\n\\n\\t\"+str_bold(\"Usage\")+\":\\tfind <query>\"\nfind_help += \"\\n\\n\\t\"+str_bold(\"Query examples\")+\":\\n\"\nfind_help += \"\\n\\t eax = 0x42\"\nfind_help += \"\\n\\t eax = ebx\"\nfind_help += \"\\n\\t eax = ebx ^ 3\"\nfind_help += \"\\n\\t eax = ebx & ecx\"\nfind_help += \"\\n\\t eax = [ebx + 16] \"\nfind_help += \"\\n\\t eax = [0x12345678] \"\nfind_help += \"\\n\\t eax += [ebx + 16]\"\nfind_help += \"\\n\\t eax *= [0x12345678]\"\nfind_help += \"\\n\\t [eax - 8] = ebx\"\nfind_help += \"\\n\\t [eax - 8] = 0x42\"\nfind_help += \"\\n\\t [eax - 8] &= ebx\"\nfind_help += \"\\n\\t [eax - 8] &= 0x42\"\nfind_help += \"\\n\\t [0x12345678] = ebx\"\nfind_help += \"\\n\\t [0x12345678] = 0x42\"\nfind_help += \"\\n\\t [0x12345678] &= ebx\"\nfind_help += \"\\n\\t [0x12345678] &= 0x42\"\nfind_help += \"\\n\\t [0x12345678] = '/bin/sh\\x00'\"\nfind_help += \"\\n\\t 0x08040120()\"\nfind_help += \"\\n\\t 0x08040120(1, 2, 3, 4)\"\nfind_help += \"\\n\\t sys_execve(0x1234, 0, 0) [syscall by name]\"\nfind_help += \"\\n\\t sys_0xb(0x1234, 0, 0)    [syscall by num]\"\n\n\nbadbytes_help = str_main(str_bold(\"\\n\\t'badbytes' Command\"))\nbadbytes_help += str_special(\"\\n\\t(Set bad bytes to avoid in ropchains)\")\nbadbytes_help += \"\\n\\n\\t\"+str_bold(\"Usage\")+\":\\tbadbytes set BYTE [BYTE ...]\"+\"\\n\\t\\tbadbytes reset\"\nbadbytes_help += \"\\n\\n\\t\"+str_bold(\"Example\")+\": badbytes set 0 0xa 0xb 255\"\n\nkeepregs_help = str_main(str_bold(\"\\n\\t'keepregs' Command\"))\nkeepregs_help += str_special(\"\\n\\t(Set registers that must not be clobbered)\")\nkeepregs_help += \"\\n\\n\\t\"+str_bold(\"Usage\")+\":\\tkeepregs set REG [REG ...]\"+\"\\n\\t\\tkeepregs reset\"\nkeepregs_help += \"\\n\\n\\t\"+str_bold(\"Example\")+\": keepregs set rsi rbp\"\n\nsafemem_help = str_main(str_bold(\"\\n\\t'safemem' Command\"))\nsafemem_help += str_special(\"\\n\\t(Enable/Disable the use of gadgets that dereference\" +\n\"\\n\\tregisters holding unknown values and thus might\"+ \"\\n\\tcause a crash)\")\nsafemem_help += \"\\n\\n\\t\"+str_bold(\"Usage\")+\":\\tsafemem set  (disable unsafe gadgets)\"+\"\\n\\t\\tsafemem unset (enable unsafe gadgets)\"\n\nabi_help = str_main(str_bold(\"\\n\\t'abi' Command\"))\nabi_help += str_special(\"\\n\\t(Set ABI to use when calling functions)\")\nabi_help += \"\\n\\n\\t\"+str_bold(\"Usage\")+\":\\tabi set <ABI>\"\nabi_help += \"\\n\\n\\t\"+str_bold(\"Supported ABIs\")+\": \"+', '.join([str_special(s) for s in str_to_abi])\n\nos_help = str_main(str_bold(\"\\n\\t'os' Command\"))\nos_help += str_special(\"\\n\\t(Set OS to target when doing syscalls)\")\nos_help += \"\\n\\n\\t\"+str_bold(\"Usage\")+\":\\tos set <operating system>\"\nos_help += \"\\n\\n\\t\"+str_bold(\"Supported systems\")+\": \"+', '.join([str_special(s) for s in str_to_os])\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "libropium/arch/arch.cpp",
    "content": "#include \"arch.hpp\"\n#include <iostream>\n\n\nArch::Arch(ArchType _type, int _bits, int _octets, int _nb, CPUMode _mode, Disassembler* _disasm): \n    type(_type), bits(_bits), octets(_octets), nb_regs(_nb), mode(_mode), disasm(_disasm){}\n    \nArch::~Arch(){\n    delete disasm;\n    disasm = nullptr;\n}\n"
  },
  {
    "path": "libropium/arch/archX86.cpp",
    "content": "#include \"expression.hpp\"\n#include \"arch.hpp\"\n#include \"disassembler.hpp\"\n#include \"exception.hpp\"\n#include \"ir.hpp\"\n#include <cstring>\n#include <sstream>\n#include <capstone/capstone.h>\n#include <capstone/x86.h>\n#include <iostream>\n\nusing std::stringstream;\n\n/* =================================== \n *             ArchX86 \n * ================================== */\nArchX86::ArchX86(): Arch(ArchType::X86, 32, 4, X86_NB_REGS, CPUMode::X86, new DisassemblerX86(CPUMode::X86)){\n}\n\nstring ArchX86::reg_name(reg_t num){\n    switch(num){\n        case X86_EAX: return \"eax\";\n        case X86_EBX: return \"ebx\";\n        case X86_ECX: return \"ecx\";\n        case X86_EDX: return \"edx\";\n        case X86_EDI: return \"edi\";\n        case X86_ESI: return \"esi\";\n        case X86_EBP: return \"ebp\";\n        case X86_ESP: return \"esp\";\n        case X86_EIP: return \"eip\";\n        case X86_CS: return \"cs\";\n        case X86_DS: return \"ds\";\n        case X86_ES: return \"es\";\n        case X86_FS: return \"fs\";\n        case X86_GS: return \"gs\";\n        case X86_SS: return \"ss\";\n        case X86_CF: return \"cf\";\n        case X86_PF: return \"pf\";\n        case X86_AF: return \"af\";\n        case X86_ZF: return \"zf\";\n        case X86_SF: return \"sf\";\n        case X86_TF: return \"tf\";\n        case X86_IF: return \"if\";\n        case X86_DF: return \"df\";\n        case X86_OF: return \"of\";\n        case X86_IOPL: return \"iopl\";\n        case X86_VM: return \"vm\";\n        case X86_NT: return \"nt\";\n        case X86_RF: return \"rf\";\n        case X86_AC: return \"ac\";\n        case X86_VIP: return \"vip\";\n        case X86_VIF: return \"vif\";\n        case X86_ID: return \"id\";\n        case X86_TSC: return \"tsc\";\n        default:\n            throw runtime_exception(\"ArchX86::reg_name() got unknown reg num\");\n    }\n    \n}\nreg_t ArchX86::reg_num(string name){\n    if( !name.compare(\"eax\")) return X86_EAX;\n    else if( !name.compare(\"ebx\")) return X86_EBX;\n    else if( !name.compare(\"ecx\")) return X86_ECX;\n    else if( !name.compare(\"edx\")) return X86_EDX;\n    else if( !name.compare(\"edi\")) return X86_EDI;\n    else if( !name.compare(\"esi\")) return X86_ESI;\n    else if( !name.compare(\"ebp\")) return X86_EBP;\n    else if( !name.compare(\"esp\")) return X86_ESP;\n    else if( !name.compare(\"eip\")) return X86_EIP;\n    else if( !name.compare(\"cs\")) return X86_CS;\n    else if( !name.compare(\"ds\")) return X86_DS;\n    else if( !name.compare(\"es\")) return X86_ES;\n    else if( !name.compare(\"fs\")) return X86_FS;\n    else if( !name.compare(\"gs\")) return X86_GS;\n    else if( !name.compare(\"ss\")) return X86_SS;\n    else if( !name.compare(\"cf\")) return X86_CF;\n    else if( !name.compare(\"pf\")) return X86_PF;\n    else if( !name.compare(\"af\")) return X86_AF;\n    else if( !name.compare(\"zf\")) return X86_ZF;\n    else if( !name.compare(\"sf\")) return X86_SF;\n    else if( !name.compare(\"tf\")) return X86_TF;\n    else if( !name.compare(\"if\")) return X86_IF;\n    else if( !name.compare(\"df\")) return X86_DF;\n    else if( !name.compare(\"of\")) return X86_OF;\n    else if( !name.compare(\"iopl\")) return X86_IOPL;\n    else if( !name.compare(\"vm\")) return X86_VM;\n    else if( !name.compare(\"nt\")) return X86_NT;\n    else if( !name.compare(\"rf\")) return X86_RF;\n    else if( !name.compare(\"ac\")) return X86_AC;\n    else if( !name.compare(\"vip\")) return X86_VIP;\n    else if( !name.compare(\"vif\")) return X86_VIF;\n    else if( !name.compare(\"id\")) return X86_ID;\n    else if( !name.compare(\"tsc\")) return X86_TSC;\n    else throw runtime_exception(QuickFmt () << \"ArchX86::reg_num() got unknown reg name: \" << name >> QuickFmt::to_str);\n}\n\nbool ArchX86::is_valid_reg(string& name){\n    return ( !name.compare(\"eax\"))\n        || (!name.compare(\"ebx\")) \n        || (!name.compare(\"ecx\")) \n        || (!name.compare(\"edx\")) \n        || (!name.compare(\"edi\")) \n        || (!name.compare(\"esi\"))\n        || (!name.compare(\"ebp\"))\n        || (!name.compare(\"esp\"))\n        || (!name.compare(\"eip\"))\n        || (!name.compare(\"cs\"))\n        || (!name.compare(\"ds\"))\n        || (!name.compare(\"es\"))\n        || (!name.compare(\"fs\"))\n        || (!name.compare(\"gs\")) \n        || (!name.compare(\"ss\")) \n        || (!name.compare(\"cf\")) \n        || (!name.compare(\"pf\")) \n        || (!name.compare(\"af\")) \n        || (!name.compare(\"zf\")) \n        || (!name.compare(\"sf\")) \n        || (!name.compare(\"tf\")) \n        || (!name.compare(\"if\")) \n        || (!name.compare(\"df\")) \n        || (!name.compare(\"of\")) \n        || (!name.compare(\"iopl\")) \n        || (!name.compare(\"vm\")) \n        || (!name.compare(\"nt\")) \n        || (!name.compare(\"rf\")) \n        || (!name.compare(\"ac\")) \n        || (!name.compare(\"vip\")) \n        || (!name.compare(\"vif\")) \n        || (!name.compare(\"id\"))\n        || (!name.compare(\"tsc\"));\n}\n\nreg_t ArchX86::sp(){\n    return X86_ESP;\n}\n\nreg_t ArchX86::pc(){\n    return X86_EIP;\n}\n\nreg_t ArchX86::tsc(){\n    return X86_TSC;\n}\n\n/* =================================== \n *             ArchX64 \n * ================================== */\n \nArchX64::ArchX64(): Arch(ArchType::X64, 64, 8, X64_NB_REGS, CPUMode::X64, new DisassemblerX86(CPUMode::X64)){\n}\n\nstring ArchX64::reg_name(reg_t num){\n    switch(num){\n        case X64_RAX: return \"rax\";\n        case X64_RBX: return \"rbx\";\n        case X64_RCX: return \"rcx\";\n        case X64_RDX: return \"rdx\";\n        case X64_RDI: return \"rdi\";\n        case X64_RSI: return \"rsi\";\n        case X64_RBP: return \"rbp\";\n        case X64_RSP: return \"rsp\";\n        case X64_RIP: return \"rip\";\n        case X64_R8: return \"r8\";\n        case X64_R9: return \"r9\";\n        case X64_R10: return \"r10\";\n        case X64_R11: return \"r11\";\n        case X64_R12: return \"r12\";\n        case X64_R13: return \"r13\";\n        case X64_R14: return \"r14\";\n        case X64_R15: return \"r15\";\n        case X64_CS: return \"cs\";\n        case X64_DS: return \"ds\";\n        case X64_ES: return \"es\";\n        case X64_FS: return \"fs\";\n        case X64_GS: return \"gs\";\n        case X64_SS: return \"ss\";\n        case X64_CF: return \"cf\";\n        case X64_PF: return \"pf\";\n        case X64_AF: return \"af\";\n        case X64_ZF: return \"zf\";\n        case X64_SF: return \"sf\";\n        case X64_TF: return \"tf\";\n        case X64_IF: return \"if\";\n        case X64_DF: return \"df\";\n        case X64_OF: return \"of\";\n        case X64_IOPL: return \"iopl\";\n        case X64_VM: return \"vm\";\n        case X64_NT: return \"nt\";\n        case X64_RF: return \"rf\";\n        case X64_AC: return \"ac\";\n        case X64_VIP: return \"vip\";\n        case X64_VIF: return \"vif\";\n        case X64_ID: return \"id\";\n        case X64_TSC: return \"tsc\";\n        default:\n            throw runtime_exception(\"ArchX64::reg_name() got unknown reg num\");\n    }\n    \n}\nreg_t ArchX64::reg_num(string name){\n    if( !name.compare(\"rax\")) return X64_RAX;\n    else if( !name.compare(\"rbx\")) return X64_RBX;\n    else if( !name.compare(\"rcx\")) return X64_RCX;\n    else if( !name.compare(\"rdx\")) return X64_RDX;\n    else if( !name.compare(\"rdi\")) return X64_RDI;\n    else if( !name.compare(\"rsi\")) return X64_RSI;\n    else if( !name.compare(\"rbp\")) return X64_RBP;\n    else if( !name.compare(\"rsp\")) return X64_RSP;\n    else if( !name.compare(\"rip\")) return X64_RIP;\n    else if( !name.compare(\"r8\")) return X64_R8;\n    else if( !name.compare(\"r9\")) return X64_R9;\n    else if( !name.compare(\"r10\")) return X64_R10;\n    else if( !name.compare(\"r11\")) return X64_R11;\n    else if( !name.compare(\"r12\")) return X64_R12;\n    else if( !name.compare(\"r13\")) return X64_R13;\n    else if( !name.compare(\"r14\")) return X64_R14;\n    else if( !name.compare(\"r15\")) return X64_R15;\n    else if( !name.compare(\"cs\")) return X64_CS;\n    else if( !name.compare(\"ds\")) return X64_DS;\n    else if( !name.compare(\"es\")) return X64_ES;\n    else if( !name.compare(\"fs\")) return X64_FS;\n    else if( !name.compare(\"gs\")) return X64_GS;\n    else if( !name.compare(\"ss\")) return X64_SS;\n    else if( !name.compare(\"cf\")) return X64_CF;\n    else if( !name.compare(\"pf\")) return X64_PF;\n    else if( !name.compare(\"af\")) return X64_AF;\n    else if( !name.compare(\"zf\")) return X64_ZF;\n    else if( !name.compare(\"sf\")) return X64_SF;\n    else if( !name.compare(\"tf\")) return X64_TF;\n    else if( !name.compare(\"if\")) return X64_IF;\n    else if( !name.compare(\"df\")) return X64_DF;\n    else if( !name.compare(\"of\")) return X64_OF;\n    else if( !name.compare(\"iopl\")) return X64_IOPL;\n    else if( !name.compare(\"vm\")) return X64_VM;\n    else if( !name.compare(\"nt\")) return X64_NT;\n    else if( !name.compare(\"rf\")) return X64_RF;\n    else if( !name.compare(\"ac\")) return X64_AC;\n    else if( !name.compare(\"vip\")) return X64_VIP;\n    else if( !name.compare(\"vif\")) return X64_VIF;\n    else if( !name.compare(\"id\")) return X64_ID;\n    else if( !name.compare(\"tsc\")) return X64_TSC;\n    else throw runtime_exception(QuickFmt () << \"ArchX86::reg_num() got unknown reg name: \" << name >> QuickFmt::to_str);\n}\n\nbool ArchX64::is_valid_reg(string& name){\n    return (!name.compare(\"rax\")) \n        || (!name.compare(\"rbx\")) \n        || (!name.compare(\"rcx\")) \n        || (!name.compare(\"rdx\")) \n        || (!name.compare(\"rdi\")) \n        || (!name.compare(\"rsi\")) \n        || (!name.compare(\"rbp\")) \n        || (!name.compare(\"rsp\")) \n        || (!name.compare(\"rip\")) \n        || (!name.compare(\"r8\")) \n        || (!name.compare(\"r9\")) \n        || (!name.compare(\"r10\")) \n        || (!name.compare(\"r11\")) \n        || (!name.compare(\"r12\")) \n        || (!name.compare(\"r13\")) \n        || (!name.compare(\"r14\")) \n        || (!name.compare(\"r15\")) \n        || (!name.compare(\"cs\")) \n        || (!name.compare(\"ds\")) \n        || (!name.compare(\"es\")) \n        || (!name.compare(\"fs\")) \n        || (!name.compare(\"gs\")) \n        || (!name.compare(\"ss\")) \n        || (!name.compare(\"cf\")) \n        || (!name.compare(\"pf\")) \n        || (!name.compare(\"af\")) \n        || (!name.compare(\"zf\")) \n        || (!name.compare(\"sf\")) \n        || (!name.compare(\"tf\")) \n        || (!name.compare(\"if\")) \n        || (!name.compare(\"df\")) \n        || (!name.compare(\"of\")) \n        || (!name.compare(\"iopl\")) \n        || (!name.compare(\"vm\")) \n        || (!name.compare(\"nt\")) \n        || (!name.compare(\"rf\")) \n        || (!name.compare(\"ac\")) \n        || (!name.compare(\"vip\")) \n        || (!name.compare(\"vif\")) \n        || (!name.compare(\"id\")) \n        || (!name.compare(\"tsc\"));\n}\n\nreg_t ArchX64::sp(){\n    return X64_RSP;\n}\n\nreg_t ArchX64::pc(){\n    return X64_RIP;\n}\n\nreg_t ArchX64::tsc(){\n    return X64_TSC;\n}\n\n/* =================================== \n *        X86 & X64 Disassembler\n * ================================== */\nDisassemblerX86::DisassemblerX86(CPUMode mode){\n    _mode = mode;\n    if( mode == CPUMode::X86 ){\n        cs_open(CS_ARCH_X86, CS_MODE_32, &_handle);\n    }else if( mode == CPUMode::X64 ){\n        cs_open(CS_ARCH_X86, CS_MODE_64, &_handle);\n    }else{\n        throw runtime_exception(\"DisassemblerX86: got unsupported mode\");\n    }\n    // Ask for detailed instructions\n    cs_option(_handle, CS_OPT_DETAIL, CS_OPT_ON);\n    // allocate memory cache for 1 instruction, to be used by cs_disasm_iter later.\n    // (will be freed in destructor)\n    _insn = cs_malloc(_handle);\n}\n\ninline IROperand x86_32_reg_translate(x86_reg reg){\n    switch(reg){\n        case X86_REG_AL: return IROperand(IROperandType::VAR, X86_EAX, 7, 0);\n        case X86_REG_AH: return IROperand(IROperandType::VAR, X86_EAX, 15, 8);\n        case X86_REG_AX: return IROperand(IROperandType::VAR, X86_EAX, 15, 0);\n        case X86_REG_EAX: return IROperand(IROperandType::VAR, X86_EAX, 31, 0);\n        case X86_REG_BL: return IROperand(IROperandType::VAR, X86_EBX, 7, 0);\n        case X86_REG_BH: return IROperand(IROperandType::VAR, X86_EBX, 15, 8);\n        case X86_REG_BX: return IROperand(IROperandType::VAR, X86_EBX, 15, 0);\n        case X86_REG_EBX: return IROperand(IROperandType::VAR, X86_EBX , 31, 0);\n        case X86_REG_CL: return IROperand(IROperandType::VAR, X86_ECX, 7, 0);\n        case X86_REG_CH: return IROperand(IROperandType::VAR, X86_ECX, 15, 8);\n        case X86_REG_CX: return IROperand(IROperandType::VAR, X86_ECX, 15, 0);\n        case X86_REG_ECX: return IROperand(IROperandType::VAR, X86_ECX, 31, 0);\n        case X86_REG_DL: return IROperand(IROperandType::VAR, X86_EDX, 7, 0);\n        case X86_REG_DH: return IROperand(IROperandType::VAR, X86_EDX, 15, 8);\n        case X86_REG_DX: return IROperand(IROperandType::VAR, X86_EDX, 15, 0);\n        case X86_REG_EDX: return IROperand(IROperandType::VAR, X86_EDX, 31, 0);\n        case X86_REG_DI: return IROperand(IROperandType::VAR, X86_EDI, 15, 0);\n        case X86_REG_EDI: return IROperand(IROperandType::VAR, X86_EDI, 31, 0);\n        case X86_REG_SI: return IROperand(IROperandType::VAR, X86_ESI, 15, 0);\n        case X86_REG_ESI: return IROperand(IROperandType::VAR, X86_ESI, 31, 0);\n        case X86_REG_BP: return IROperand(IROperandType::VAR, X86_EBP, 15, 0);\n        case X86_REG_EBP: return IROperand(IROperandType::VAR, X86_EBP, 31, 0);\n        case X86_REG_SP: return IROperand(IROperandType::VAR, X86_ESP, 15, 0);\n        case X86_REG_ESP: return IROperand(IROperandType::VAR, X86_ESP, 31, 0);\n        case X86_REG_IP: return IROperand(IROperandType::VAR, X86_EIP, 15, 0);\n        case X86_REG_EIP: return IROperand(IROperandType::VAR, X86_EIP, 31, 0);\n        case X86_REG_CS: return IROperand(IROperandType::VAR, X86_CS, 31, 0);\n        case X86_REG_DS: return IROperand(IROperandType::VAR, X86_DS, 31, 0);\n        case X86_REG_ES: return IROperand(IROperandType::VAR, X86_ES, 31, 0);\n        case X86_REG_GS: return IROperand(IROperandType::VAR, X86_GS, 31, 0);\n        case X86_REG_FS: return IROperand(IROperandType::VAR, X86_FS, 31, 0);\n        case X86_REG_SS: return IROperand(IROperandType::VAR, X86_SS, 31, 0);\n        default: throw runtime_exception( QuickFmt() <<\n        \"Disassembler X86: unknown capstone register \" << reg \n        >> QuickFmt::to_str);\n    }\n}\n\ninline IROperand x86_64_reg_translate(x86_reg reg){\n    switch(reg){\n        case X86_REG_AL: return IROperand(IROperandType::VAR, X64_RAX, 7, 0);\n        case X86_REG_AH: return IROperand(IROperandType::VAR, X64_RAX, 15, 8);\n        case X86_REG_AX: return IROperand(IROperandType::VAR, X64_RAX, 15, 0);\n        case X86_REG_EAX: return IROperand(IROperandType::VAR, X64_RAX, 31, 0);\n        case X86_REG_RAX: return IROperand(IROperandType::VAR, X64_RAX, 63, 0);\n        case X86_REG_BL: return IROperand(IROperandType::VAR, X64_RBX, 7, 0);\n        case X86_REG_BH: return IROperand(IROperandType::VAR, X64_RBX, 15, 8);\n        case X86_REG_BX: return IROperand(IROperandType::VAR, X64_RBX, 15, 0);\n        case X86_REG_EBX: return IROperand(IROperandType::VAR, X64_RBX , 31, 0);\n        case X86_REG_RBX: return IROperand(IROperandType::VAR, X64_RBX , 63, 0);\n        case X86_REG_CL: return IROperand(IROperandType::VAR, X64_RCX, 7, 0);\n        case X86_REG_CH: return IROperand(IROperandType::VAR, X64_RCX, 15, 8);\n        case X86_REG_CX: return IROperand(IROperandType::VAR, X64_RCX, 15, 0);\n        case X86_REG_ECX: return IROperand(IROperandType::VAR, X64_RCX, 31, 0);\n        case X86_REG_RCX: return IROperand(IROperandType::VAR, X64_RCX, 63, 0);\n        case X86_REG_DL: return IROperand(IROperandType::VAR, X64_RDX, 7, 0);\n        case X86_REG_DH: return IROperand(IROperandType::VAR, X64_RDX, 15, 8);\n        case X86_REG_DX: return IROperand(IROperandType::VAR, X64_RDX, 15, 0);\n        case X86_REG_EDX: return IROperand(IROperandType::VAR, X64_RDX, 31, 0);\n        case X86_REG_RDX: return IROperand(IROperandType::VAR, X64_RDX, 63, 0);\n        case X86_REG_DI: return IROperand(IROperandType::VAR, X64_RDI, 15, 0);\n        case X86_REG_EDI: return IROperand(IROperandType::VAR, X64_RDI, 31, 0);\n        case X86_REG_RDI: return IROperand(IROperandType::VAR, X64_RDI, 63, 0);\n        case X86_REG_SI: return IROperand(IROperandType::VAR, X64_RSI, 15, 0);\n        case X86_REG_ESI: return IROperand(IROperandType::VAR, X64_RSI, 31, 0);\n        case X86_REG_RSI: return IROperand(IROperandType::VAR, X64_RSI, 63, 0);\n        case X86_REG_BP: return IROperand(IROperandType::VAR, X64_RBP, 15, 0);\n        case X86_REG_EBP: return IROperand(IROperandType::VAR, X64_RBP, 31, 0);\n        case X86_REG_RBP: return IROperand(IROperandType::VAR, X64_RBP, 63, 0);\n        case X86_REG_SP: return IROperand(IROperandType::VAR, X64_RSP, 15, 0);\n        case X86_REG_ESP: return IROperand(IROperandType::VAR, X64_RSP, 31, 0);\n        case X86_REG_RSP: return IROperand(IROperandType::VAR, X64_RSP, 63, 0);\n        case X86_REG_IP: return IROperand(IROperandType::VAR, X64_RIP, 15, 0);\n        case X86_REG_EIP: return IROperand(IROperandType::VAR, X64_RIP, 31, 0);\n        case X86_REG_RIP: return IROperand(IROperandType::VAR, X64_RIP, 63, 0);\n        case X86_REG_R8: return IROperand(IROperandType::VAR, X64_R8, 63, 0);\n        case X86_REG_R8B: return IROperand(IROperandType::VAR, X64_R8, 7, 0);\n        case X86_REG_R8D: return IROperand(IROperandType::VAR, X64_R8, 31, 0);\n        case X86_REG_R8W: return IROperand(IROperandType::VAR, X64_R8, 15, 0);\n        case X86_REG_R9: return IROperand(IROperandType::VAR, X64_R9, 63, 0);\n        case X86_REG_R9B: return IROperand(IROperandType::VAR, X64_R9, 7, 0);\n        case X86_REG_R9D: return IROperand(IROperandType::VAR, X64_R9, 31, 0);\n        case X86_REG_R9W: return IROperand(IROperandType::VAR, X64_R9, 15, 0);\n        case X86_REG_R10: return IROperand(IROperandType::VAR, X64_R10, 63, 0);\n        case X86_REG_R10B: return IROperand(IROperandType::VAR, X64_R10, 7, 0);\n        case X86_REG_R10D: return IROperand(IROperandType::VAR, X64_R10, 31, 0);\n        case X86_REG_R10W: return IROperand(IROperandType::VAR, X64_R10, 15, 0);\n        case X86_REG_R11: return IROperand(IROperandType::VAR, X64_R11, 63, 0);\n        case X86_REG_R11B: return IROperand(IROperandType::VAR, X64_R11, 7, 0);\n        case X86_REG_R11D: return IROperand(IROperandType::VAR, X64_R11, 31, 0);\n        case X86_REG_R11W: return IROperand(IROperandType::VAR, X64_R11, 15, 0);\n        case X86_REG_R12: return IROperand(IROperandType::VAR, X64_R12, 63, 0);\n        case X86_REG_R12B: return IROperand(IROperandType::VAR, X64_R12, 7, 0);\n        case X86_REG_R12D: return IROperand(IROperandType::VAR, X64_R12, 31, 0);\n        case X86_REG_R12W: return IROperand(IROperandType::VAR, X64_R12, 15, 0);\n        case X86_REG_R13: return IROperand(IROperandType::VAR, X64_R13, 63, 0);\n        case X86_REG_R13B: return IROperand(IROperandType::VAR, X64_R13, 7, 0);\n        case X86_REG_R13D: return IROperand(IROperandType::VAR, X64_R13, 31, 0);\n        case X86_REG_R13W: return IROperand(IROperandType::VAR, X64_R13, 15, 0);\n        case X86_REG_R14: return IROperand(IROperandType::VAR, X64_R14, 63, 0);\n        case X86_REG_R14B: return IROperand(IROperandType::VAR, X64_R14, 7, 0);\n        case X86_REG_R14D: return IROperand(IROperandType::VAR, X64_R14, 31, 0);\n        case X86_REG_R14W: return IROperand(IROperandType::VAR, X64_R14, 15, 0);\n        case X86_REG_R15: return IROperand(IROperandType::VAR, X64_R15, 63, 0);\n        case X86_REG_R15B: return IROperand(IROperandType::VAR, X64_R15, 7, 0);\n        case X86_REG_R15D: return IROperand(IROperandType::VAR, X64_R15, 31, 0);\n        case X86_REG_R15W: return IROperand(IROperandType::VAR, X64_R15, 15, 0);\n        case X86_REG_CS: return IROperand(IROperandType::VAR, X64_CS, 63, 0);\n        case X86_REG_DS: return IROperand(IROperandType::VAR, X64_DS, 63, 0);\n        case X86_REG_ES: return IROperand(IROperandType::VAR, X64_ES, 63, 0);\n        case X86_REG_GS: return IROperand(IROperandType::VAR, X64_GS, 63, 0);\n        case X86_REG_FS: return IROperand(IROperandType::VAR, X64_FS, 63, 0);\n        case X86_REG_SS: return IROperand(IROperandType::VAR, X64_SS, 63, 0);\n        default: throw runtime_exception( QuickFmt() <<\n        \"Disassembler X86: unknown capstone register \" << reg \n        >> QuickFmt::to_str);\n    }\n}\n\ninline IROperand x86_reg_translate(CPUMode mode, x86_reg reg){\n    if( mode == CPUMode::X86 ){\n        return x86_32_reg_translate(reg);\n    }else{\n        return x86_64_reg_translate(reg);\n    }\n}\n\ninline IROperand x86_arg_extract(IROperand& arg, exprsize_t high, exprsize_t low){\n    switch(arg.type){\n        case IROperandType::CST: return IROperand(IROperandType::CST, arg.cst(), high, low);\n        case IROperandType::VAR: return IROperand(IROperandType::VAR, arg.var(), high, low);\n        case IROperandType::TMP: return IROperand(IROperandType::TMP, arg.tmp(), high, low);\n        case IROperandType::NONE: return IROperand();\n        default: throw runtime_exception(\"x86_arg_extract(): got unknown IROperandType!\");\n    }\n}\n\n/* Translate capstone argument to IR argument \n * Arguments:\n *      mode - the current CPU mode for registers translation \n *      addr - the address of the instruction being translated\n *      arg - the capstone operand \n *      block/bblkid - block and basicblockid where to add instructions if needed \n *      tmp_var_count - the counter of the tmp variables used in the current IRBlock\n *      load_mem - if TRUE then load memory operands (dereference), else only return the operand (pointer) \n */\ninline IROperand x86_arg_translate(CPUMode mode, addr_t addr, cs_x86_op* arg, IRBlock* block, IRBasicBlockId bblkid, int& tmp_vars_count, bool load_mem=false){\n    IROperand base, index, res, disp, segment;\n    exprsize_t size = arg->size*8, addr_size = 0, reg_size = (mode==CPUMode::X86)? 32:64;\n    try{\n        switch(arg->type){\n            /* Register */\n            case X86_OP_REG:\n                return x86_reg_translate(mode, arg->reg);\n            /* Immediate */\n            case X86_OP_IMM:\n                return IROperand(IROperandType::CST, arg->imm, size-1, 0);\n            /* Memory */\n            case X86_OP_MEM:\n                // Arg = segment + base + (index*scale) + disp\n                // Get index*scale\n                if( arg->mem.index != X86_OP_INVALID ){\n                    index = x86_reg_translate(mode, (x86_reg)arg->mem.index);\n                    if( arg->mem.scale != 1 ){\n                        block->add_instr(bblkid, IRInstruction(IROperation::MUL, ir_tmp(tmp_vars_count++, index.size-1, 0), \n                            ir_cst(arg->mem.scale, index.size-1, 0), index, addr));\n                        index = ir_tmp(tmp_vars_count-1, index.size-1, 0);\n                    }\n                    addr_size = index.size;\n                }\n                // Get base\n                if( arg->mem.base != X86_OP_INVALID ){\n                    base = x86_reg_translate(mode, (x86_reg)arg->mem.base);\n                    // If too small adjust\n                    if( base.size < index.size ){\n                        block->add_instr(bblkid, ir_mov(ir_tmp(tmp_vars_count++, index.size-1, 0), ir_cst(0, index.size-1, 0), addr));\n                        block->add_instr(bblkid, ir_mov(ir_tmp(tmp_vars_count-1, base.size-1, 0), base, addr));\n                        base = ir_tmp(tmp_vars_count-1, base.size-1, 0);\n                    }\n                    addr_size = base.size;\n                }else{\n                    //base = ir_cst(0, index.size-1, 0);\n                    base = ir_none();\n                    //throw runtime_exception(\"Disassembler X86: didn't expect X86_OP_INVALID base for mem operand in capstone\");\n                }\n                \n                // Get displacement\n                if( addr_size == 0 )\n                    addr_size = reg_size;\n                if( arg->mem.disp != 0 ){\n                    disp = IROperand(IROperandType::CST, arg->mem.disp, addr_size-1, 0);\n                }else{\n                    disp = ir_none();\n                }\n\n                // Get segment selector (here we consider that the segment selector symbolic register holds the address\n                // of the segment, not the index in the GDT\n                if( arg->mem.segment != X86_OP_INVALID ){\n                    segment = x86_reg_translate(mode, (x86_reg)arg->mem.segment);\n                    // If too big, adjust\n                    if( segment.size > addr_size ){\n                        block->add_instr(bblkid, ir_mov(ir_tmp(tmp_vars_count++, addr_size-1, 0), x86_arg_extract(segment, addr_size-1, 0), addr));\n                        segment = ir_tmp(tmp_vars_count-1, addr_size-1, 0);\n                    }\n                }else{\n                    segment = ir_none();\n                }\n                \n                // === Build the operand now ===\n                // Add base and index if any \n                if( !index.is_none() ){\n                    if( !base.is_none() ){\n                        block->add_instr(bblkid, ir_add(ir_tmp(tmp_vars_count++, index.size-1, 0), base, index, addr));\n                        res = IROperand(IROperandType::TMP, tmp_vars_count-1, index.size-1, 0);\n                    }else{\n                        res = index;\n                    }\n                }else if(!base.is_none()){\n                    res = base;\n                }else{\n                    res = ir_none();\n                }\n                // Add displacement if any \n                if( !disp.is_none() ){\n                    if( !res.is_none()){\n                        block->add_instr(bblkid, ir_add( ir_tmp(tmp_vars_count++, res.size-1, 0), disp, res, addr));\n                        res = IROperand(IROperandType::TMP, tmp_vars_count-1, res.size-1, 0);\n                    }else{\n                        res = disp;\n                    }\n                }\n                // Add segment if any\n                if( !segment.is_none() ){\n                    if( !res.is_none() ){\n                        block->add_instr(bblkid, ir_add( ir_tmp(tmp_vars_count++, res.size-1, 0), segment, res, addr));\n                        res = IROperand(IROperandType::TMP, tmp_vars_count-1, res.size-1, 0);\n                    }else{\n                        res = segment;\n                    }\n                }\n\n                // Check res\n                if( res.is_none() ){\n                    throw symbolic_exception(\"Got IR_NONE memory operand\");\n                }\n                \n                // Do load memory if requested\n                if( load_mem ){\n                    block->add_instr(bblkid, IRInstruction(IROperation::LDM,\n                        IROperand(IROperandType::TMP, tmp_vars_count++, size-1 , 0), res, addr));\n                    res = IROperand(IROperandType::TMP, tmp_vars_count-1, size-1, 0);\n                }\n                return res;\n            default:\n                throw runtime_exception(QuickFmt() << \"Disassembler X86: at addr: 0x\" << std::hex\n                    << addr << \" :got unknown capstone operand type\" >> QuickFmt::to_str);\n        }\n    }catch(runtime_exception& e){\n        throw runtime_exception(QuickFmt() << \"Disassembler X86: error at addr: 0x\" << std::hex\n                    << addr << \" :\" + string(e.what()) >> QuickFmt::to_str);\n    }\n    throw runtime_exception(QuickFmt() << \"Disassembler X86: at addr: 0x\" << std::hex << addr << \" :couldn't translate operand\"\n        >> QuickFmt::to_str);\n}\n\n/* Translate a 32bits assignments to a 64bits assignment where the upper\n * 32 bits are cleared. For the bits to be cleared, the following conditions\n * must be fulfilled:\n *  - mode is CPUMOde::X64\n *  - destination is a register (IROperandType::VAR) \n *  - destination is 32 bits */\ninline void x86_adjust_reg_assign(CPUMode mode, addr_t addr, IRBlock* block, IRBasicBlockId bblkid, int& tmp_vars_count, IROperand dest, IROperand val){\n    IROperand res;\n    if( mode != CPUMode::X64 || dest.size != 32 || dest.type != IROperandType::VAR){\n        // No need to adjust, to assign result to destination\n        block->add_instr(bblkid, ir_mov(dest, val, addr));\n        return;\n    }\n    // Need to clear upper bits\n    block->add_instr(bblkid, ir_concat(ir_var(dest.var(), 63, 0), ir_cst(0, 63-val.size, 0), val, addr));\n}\n\n/* ========================================= */\ninline IROperand x86_get_pc(CPUMode mode ){\n    if( mode == CPUMode::X86 )\n        return ir_var(X86_EIP, 31, 0 );\n    else if( mode == CPUMode::X64 )\n        return ir_var(X64_RIP, 63, 0 );\n    else\n        throw runtime_exception(\"x86_get_pc(): got unknown CPUMode!\");\n}\n\ninline IROperand x86_get_tsc(CPUMode mode ){\n    if( mode == CPUMode::X86 )\n        return ir_var(X86_TSC, 63, 0 );\n    else if( mode == CPUMode::X64 )\n        return ir_var(X64_TSC, 63, 0 );\n    else\n        throw runtime_exception(\"x86_get_pc(): got unknown CPUMode!\");\n}\n\ninline void x86_set_zf(CPUMode mode, IROperand& arg, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid){\n    if( mode == CPUMode::X86 )\n        block->add_instr(bblkid, ir_bisz(ir_var(X86_ZF, 31, 0), arg, ir_cst(1, 31, 0), addr));\n    else\n        block->add_instr(bblkid, ir_bisz(ir_var(X64_ZF, 63, 0), arg , ir_cst(1, 63, 0), addr));\n}\n\ninline void x86_add_set_cf(CPUMode mode, IROperand op0, IROperand op1, IROperand res, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid, int& tmp_var_count){\n    IROperand   msb0 = x86_arg_extract(op0, op0.high, op0.high),\n                msb1 = x86_arg_extract(op1, op1.high, op1.high),\n                msb2 = x86_arg_extract(res, res.high, res.high),\n                tmp0 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp1 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp2 = ir_tmp(tmp_var_count++, 0, 0 );\n    // cf -> higher bits of both operands are already 1 \n    \n    block->add_instr(bblkid, ir_and(tmp0, msb0, msb1, addr));\n    //       or they are 1 and 0 and result has MSB 0\n    block->add_instr(bblkid, ir_xor(tmp1, msb0, msb1, addr));\n    block->add_instr(bblkid, ir_not(tmp2, msb2, addr));\n    block->add_instr(bblkid, ir_and(tmp2, tmp1, tmp2, addr));\n    block->add_instr(bblkid, ir_or(tmp2, tmp0, tmp2, addr)); \n    if( mode == CPUMode::X86 )\n        block->add_instr(bblkid, ir_bisz( ir_var(X86_CF, 31, 0),tmp2, ir_cst(0, 31, 0), addr));\n    else if( mode == CPUMode::X64 )\n        block->add_instr(bblkid, ir_bisz( ir_var(X64_CF, 63, 0),tmp2, ir_cst(0, 63, 0), addr));\n}\n\ninline void x86_add_set_of(CPUMode mode, IROperand op0, IROperand op1, IROperand res, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid, int& tmp_var_count){\n    IROperand   msb0 = x86_arg_extract(op0, op0.high, op0.high),\n                msb1 = x86_arg_extract(op1, op1.high, op1.high),\n                msb2 = x86_arg_extract(res, res.high, res.high),\n                tmp0 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp1 = ir_tmp(tmp_var_count++, 0, 0 );     \n    \n    // of -> msb of both operands have the same MSB but result\n    //       has different\n    block->add_instr(bblkid, ir_xor(tmp0, msb0, msb1, addr));\n    block->add_instr(bblkid, ir_not(tmp0, tmp0, addr));\n    block->add_instr(bblkid, ir_xor(tmp1, msb0, msb2, addr));\n    block->add_instr(bblkid, ir_and(tmp1, tmp0, tmp1, addr));\n    if( mode == CPUMode::X86 )\n        block->add_instr(bblkid, ir_bisz(ir_var(X86_OF, 31, 0), tmp1, ir_cst(0, 31, 0), addr));\n    else if( mode == CPUMode::X64 )\n        block->add_instr(bblkid, ir_bisz(ir_var(X64_OF, 63, 0), tmp1, ir_cst(0, 63, 0), addr));\n}\n\ninline void x86_sub_set_cf(CPUMode mode, IROperand op0, IROperand op1, IROperand res, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid, int& tmp_var_count){\n    IROperand   msb0 = x86_arg_extract(op0, op0.high, op0.high),\n                msb1 = x86_arg_extract(op1, op1.high, op1.high),\n                msb2 = x86_arg_extract(res, res.high, res.high),\n                tmp0 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp1 = ir_tmp(tmp_var_count++, 0, 0 );\n    // cf <- (~msb0&msb1) | (msb1&msb2) | (~msb0&msb2)\n    block->add_instr(bblkid, ir_not(tmp0, msb0, addr));\n    block->add_instr(bblkid, ir_and(tmp0, tmp0, msb1, addr));\n    block->add_instr(bblkid, ir_and(tmp1, msb1, msb2, addr));\n    block->add_instr(bblkid, ir_or(tmp1, tmp0, tmp1, addr));\n    \n    block->add_instr(bblkid, ir_not(tmp0, msb0, addr));\n    block->add_instr(bblkid, ir_and(tmp0, tmp0, msb2, addr));\n    block->add_instr(bblkid, ir_or(tmp1, tmp1, tmp0, addr)); \n    if( mode == CPUMode::X86 )\n        block->add_instr(bblkid, ir_bisz( ir_var(X86_CF, 31, 0),tmp1, ir_cst(0, 31, 0), addr));\n    else if( mode == CPUMode::X64 )\n        block->add_instr(bblkid, ir_bisz( ir_var(X64_CF, 63, 0),tmp1, ir_cst(0, 63, 0), addr));\n}\n\ninline void x86_sub_set_af(CPUMode mode, IROperand op0, IROperand op1, IROperand res, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid, int& tmp_var_count){\n    /* Like cf but for bit 3 */\n    IROperand   msb0 = x86_arg_extract(op0, 3, 3),\n                msb1 = x86_arg_extract(op1, 3, 3),\n                msb2 = x86_arg_extract(res, 3, 3),\n                tmp0 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp1 = ir_tmp(tmp_var_count++, 0, 0 );\n    // cf <- (~msb0&msb1) | (msb1&msb2) | (~msb0&msb2)\n    block->add_instr(bblkid, ir_not(tmp0, msb0, addr));\n    block->add_instr(bblkid, ir_and(tmp0, tmp0, msb1, addr));\n    block->add_instr(bblkid, ir_and(tmp1, msb1, msb2, addr));\n    block->add_instr(bblkid, ir_or(tmp1, tmp0, tmp1, addr));\n    \n    block->add_instr(bblkid, ir_not(tmp0, msb0, addr));\n    block->add_instr(bblkid, ir_and(tmp0, tmp0, msb2, addr));\n    block->add_instr(bblkid, ir_or(tmp1, tmp1, tmp0, addr)); \n    if( mode == CPUMode::X86 )\n        block->add_instr(bblkid, ir_bisz( ir_var(X86_AF, 31, 0),tmp1, ir_cst(0, 31, 0), addr));\n    else if( mode == CPUMode::X64 )\n        block->add_instr(bblkid, ir_bisz( ir_var(X64_AF, 63, 0),tmp1, ir_cst(0, 63, 0), addr));\n}\n\ninline void x86_sub_set_of(CPUMode mode, IROperand op0, IROperand op1, IROperand res, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid, int& tmp_var_count){\n    IROperand   msb0 = x86_arg_extract(op0, op0.high, op0.high),\n                msb1 = x86_arg_extract(op1, op1.high, op1.high),\n                msb2 = x86_arg_extract(res, res.high, res.high),\n                tmp0 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp1 = ir_tmp(tmp_var_count++, 0, 0 );\n    \n    // of -> msb of both operands have different MSB and result\n    //       has the same as second operand\n    block->add_instr(bblkid, ir_xor(tmp0, msb0, msb1, addr));\n    block->add_instr(bblkid, ir_xor(tmp1, msb1, msb2, addr));\n    block->add_instr(bblkid, ir_not(tmp1, tmp1, addr));\n    block->add_instr(bblkid, ir_and(tmp1, tmp0, tmp1, addr));\n    if( mode == CPUMode::X86 )\n        block->add_instr(bblkid, ir_bisz(ir_var(X86_OF, 31, 0), tmp1, ir_cst(0, 31, 0), addr));\n    else if( mode == CPUMode::X64 )\n        block->add_instr(bblkid, ir_bisz(ir_var(X64_OF, 63, 0), tmp1, ir_cst(0, 63, 0), addr));\n}\n\ninline void x86_set_sf(CPUMode mode, IROperand& arg, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid){\n    IROperand sf = mode == CPUMode::X86 ? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0); \n    block->add_instr(bblkid, ir_bisz(sf, x86_arg_extract(arg, arg.high, arg.high), ir_cst(0, sf.high, 0), addr));\n}\n\ninline void x86_add_set_af(CPUMode mode, IROperand op0, IROperand op1, IROperand res, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid, int& tmp_var_count){\n    // Basically like cf but for bits 3\n    IROperand   msb0 = x86_arg_extract(op0, 3, 3),\n                msb1 = x86_arg_extract(op1, 3, 3),\n                msb2 = x86_arg_extract(res, 3, 3),\n                tmp0 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp1 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp2 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp3 = ir_tmp(tmp_var_count++, 0, 0 ),\n                tmp4 = ir_tmp(tmp_var_count++, 0, 0 );\n    // cf -> higher bits of both operands are already 1 \n    \n    block->add_instr(bblkid, ir_and(tmp0, msb0, msb1, addr));\n    //       or they are 1 and 0 and result has MSB 0\n    block->add_instr(bblkid, ir_xor(tmp1, msb0, msb1, addr));\n    block->add_instr(bblkid, ir_not(tmp2, msb2, addr));\n    block->add_instr(bblkid, ir_and(tmp3, tmp1, tmp2, addr));\n    block->add_instr(bblkid, ir_or(tmp4, tmp0, tmp3, addr)); \n    if( mode == CPUMode::X86 )\n        block->add_instr(bblkid, ir_bisz( ir_var(X86_AF, 31, 0),tmp4, ir_cst(0, 31, 0), addr));\n    else if( mode == CPUMode::X64 )\n        block->add_instr(bblkid, ir_bisz( ir_var(X64_AF, 63, 0),tmp4, ir_cst(0, 63, 0), addr));\n}\n\ninline void x86_set_pf(CPUMode mode, IROperand arg, addr_t addr, IRBlock* block, IRBasicBlockId bblkid, int& tmp_var_count){\n    // pf number of bits that are equal to zero in the least significant byte \n    // of the result of an operation -> xor all and set flag if zero \n    IROperand tmp =  ir_tmp(tmp_var_count++, 0, 0 );\n    block->add_instr(bblkid, ir_mov(tmp, x86_arg_extract(arg, 0, 0), addr));\n    for( int i = 1; i < 8; i++){\n        block->add_instr(bblkid, ir_xor(tmp, tmp, x86_arg_extract(arg, i, i), addr));\n    }\n    if( mode == CPUMode::X86 ){\n        block->add_instr(bblkid, ir_bisz(ir_var(X86_PF, 31, 0), tmp, ir_cst(1, 31, 0), addr));\n    }else if( mode == CPUMode::X64 ){\n        block->add_instr(bblkid, ir_bisz(ir_var(X64_PF, 63, 0), tmp, ir_cst(1, 31, 0), addr));\n    }\n}\n\n/* =====================\n * Instruction prefixes \n * =====================\n\n*/\n\nIRBasicBlockId _x86_init_prefix(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid){\n    IRBasicBlockId start;\n    if( instr->detail->x86.prefix[0] != X86_PREFIX_REP &&\n        instr->detail->x86.prefix[0] != X86_PREFIX_REPNE ){\n        return -1;\n    }\n    start = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(ir_cst(1, 31, 0), ir_cst(start, 31, 0), ir_none(), addr));\n    bblkid = block->new_bblock();\n    return start;\n}\n\nbool inline _accepts_repe_prefix(cs_insn* instr){\n    return  instr->id == X86_INS_CMPSB ||\n            instr->id == X86_INS_CMPSW ||\n            instr->id == X86_INS_CMPSD ||\n            instr->id == X86_INS_CMPSQ ||\n            instr->id == X86_INS_SCASB ||\n            instr->id == X86_INS_SCASW ||\n            instr->id == X86_INS_SCASD ||\n            instr->id == X86_INS_SCASQ;\n}   \n\nbool inline _accepts_rep_prefix(cs_insn* instr){\n    return  instr->id == X86_INS_INSB ||\n            instr->id == X86_INS_INSW ||\n            instr->id == X86_INS_INSD ||\n            instr->id == X86_INS_MOVSB ||\n            instr->id == X86_INS_MOVSW ||\n            instr->id == X86_INS_MOVSD ||\n            instr->id == X86_INS_MOVSQ ||\n            instr->id == X86_INS_OUTSB ||\n            instr->id == X86_INS_OUTSW ||\n            instr->id == X86_INS_OUTSD ||\n            instr->id == X86_INS_LODSB ||\n            instr->id == X86_INS_LODSW ||\n            instr->id == X86_INS_LODSD ||\n            instr->id == X86_INS_LODSQ ||\n            instr->id == X86_INS_STOSB ||\n            instr->id == X86_INS_STOSW ||\n            instr->id == X86_INS_STOSD ||\n            instr->id == X86_INS_STOSQ;\n}\n\n/* Wraps an instruction block with a REP prefix\n * Parameters:\n *      start - the basic block where to test the terminating condition. The instruction semantics start at start+1\n *      last - the current last bblock of the instruction \n * \n */\n \ninline void _x86_end_prefix(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId start, IRBasicBlockId& last, int& tmp_var_count){\n    IROperand cx = (mode == CPUMode::X86)? ir_var(X86_ECX, 31, 0): ir_var(X64_RCX, 63, 0);\n    IROperand zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0): ir_var(X64_ZF, 63, 0);  \n    IROperand tmp;\n    IRBasicBlockId end;\n    \n    if( instr->detail->x86.prefix[0] != X86_PREFIX_REP &&\n        instr->detail->x86.prefix[0] != X86_PREFIX_REPNE ){\n        return;\n    }\n    \n    /* Add loop and cx decrement at the end of the instruction */\n    block->add_instr(last, ir_sub(cx, cx, ir_cst(1, cx.size-1, 0), addr));\n    block->add_instr(last, ir_bcc(ir_cst(1, 31, 0), ir_cst(start, 31, 0), ir_none(), addr));\n    \n    /* Add REP test in the beginning */\n    end = block->new_bblock();\n    if( instr->detail->x86.prefix[0] == X86_PREFIX_REP && _accepts_rep_prefix(instr) ){\n        block->add_instr(start, ir_bcc(cx, ir_cst(start+1, 31, 0), ir_cst(end, 31, 0), addr));\n    }else if( instr->detail->x86.prefix[0] == X86_PREFIX_REP && _accepts_repe_prefix(instr) ){\n        tmp = ir_tmp(tmp_var_count++, 0, 0);\n        block->add_instr(start, ir_bisz(tmp, cx, ir_cst(0, 0, 0), addr));\n        block->add_instr(start, ir_and(tmp, tmp, x86_arg_extract(zf, 0, 0), addr));\n        block->add_instr(start, ir_bcc(tmp, ir_cst(start+1, 31, 0), ir_cst(end, 31, 0), addr));\n    }else if( instr->detail->x86.prefix[0] == X86_PREFIX_REPNE ){\n        tmp = ir_tmp(tmp_var_count++, 0, 0);\n        block->add_instr(start, ir_bisz(tmp, cx, ir_cst(1, 0, 0), addr));\n        block->add_instr(start, ir_or(tmp, tmp, x86_arg_extract(zf, 0, 0), addr));\n        block->add_instr(start, ir_bcc(tmp, ir_cst(end, 31, 0), ir_cst(start+1, 31, 0), addr));\n    }\n    \n    last = end; // Update last basic block\n}\n\n\n/* ========================================= */\n/* Instructions translation */\n\n\ninline void x86_aaa_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand   af, eax, cf, tmp0, tmp1, pc;   \n    if( mode == CPUMode::X86 ){\n        eax = ir_var(X86_EAX, 31, 0);\n        af = ir_var(X86_AF, 31, 0);\n        cf = ir_var(X86_CF, 31, 0);\n    }else if( mode == CPUMode::X64 ){\n        throw illegal_instruction_exception(\"X86 AAA instruction is valid only in 32-bit mode\");\n    }\n    tmp0 = ir_tmp(tmp_var_count++, af.size-1, 0), // Get the size from any register \n    tmp1 = ir_tmp(tmp_var_count++, af.size-1, 0);\n    /* If 4 LSB are > 9 or if AF is set then adjust the unpacked BCD values */\n    // (4 LSB) > 9\n    block->add_instr(bblkid, ir_bisz(tmp0, x86_arg_extract(eax, 3, 3), ir_cst(0, eax.size,0), addr));\n    block->add_instr(bblkid, ir_bisz(tmp1, x86_arg_extract(eax, 2, 1), ir_cst(0, eax.size,0), addr));\n    block->add_instr(bblkid, ir_and(tmp1, tmp1, tmp0, addr));\n    // AF\n    block->add_instr(bblkid, ir_or(tmp1, af, tmp1, addr));\n    // Branch depending on condition \n    block->add_instr(bblkid, ir_bcc(tmp1, ir_cst(bblkid+1, 31,0), ir_cst(bblkid+2, 31, 0), addr));\n    // 1°) Branch 1 - Do the adjust \n    bblkid = block->new_bblock();\n    // AL <- AL + 6\n    block->add_instr(bblkid, ir_add(x86_arg_extract(eax, 7, 0), x86_arg_extract(eax, 7, 0), ir_cst(6, 7, 0), addr));\n    // AH ++ \n    block->add_instr(bblkid, ir_add(x86_arg_extract(eax, 15, 8), x86_arg_extract(eax, 15, 8), ir_cst(1, 7, 0), addr));\n    // CF <- 1 , AF <- 1\n    block->add_instr(bblkid, ir_mov(af, ir_cst(1, af.size-1, 0), addr));\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(1, af.size-1, 0), addr));\n    // Jump to common end\n    block->add_instr(bblkid, ir_bcc(ir_cst(1, 0, 0), ir_cst(bblkid+2, 31, 0), ir_none(), addr));\n    \n    // 2°) Branch 2 - Just reset flags\n    bblkid = block->new_bblock();\n    block->add_instr(bblkid, ir_mov(af, ir_cst(0, af.size-1, 0), addr));\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(0, af.size-1, 0), addr));\n    // Jump to common end\n    block->add_instr(bblkid, ir_bcc(ir_cst(1, 0, 0), ir_cst(bblkid+1, 31, 0), ir_none(), addr));\n    \n    // 3°) Common end - Keep only 4 LSB of AL\n    bblkid = block->new_bblock();\n    block->add_instr(bblkid, ir_and(x86_arg_extract(eax, 7, 0), x86_arg_extract(eax, 7, 0), ir_cst(0xf, 7, 0), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_aad_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand  tmp0, imm, al, pc;   \n    if( mode != CPUMode::X86 ){\n        throw illegal_instruction_exception(\"X86 AAD instruction is valid only in 32-bit mode\");\n    }\n    tmp0 = ir_tmp(tmp_var_count++, 7, 0), // Get the size from any register \n    imm = ir_cst(0xa, 7, 0); // 2 byte of the encoded instruction always 0xA for AAD\n    al = ir_var(X86_EAX, 7,0);\n    // AL <- (AL + (AH ∗ imm8)) & 0xFF;\n    // AH <- 0\n    block->add_instr(bblkid, ir_mul(tmp0, ir_var(X86_EAX, 15, 8), imm, addr));\n    block->add_instr(bblkid, ir_add(al, al, tmp0, addr));\n    block->add_instr(bblkid, ir_mov(ir_var(X86_EAX, 15, 8), ir_cst(0, 7, 0), addr));\n    \n    // Set flags : SF, ZF, PF\n    x86_set_sf(mode, al, addr, block, bblkid);\n    x86_set_zf(mode, al, addr, block, bblkid); \n    x86_set_pf(mode, al, addr, block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    return;\n}\n\ninline void x86_aam_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand  tmp0, imm, al, pc;   \n    if( mode != CPUMode::X86 ){\n        throw illegal_instruction_exception(\"X86 AAM instruction is valid only in 32-bit mode\");\n    }\n    tmp0 = ir_tmp(tmp_var_count++, 7, 0), // Get the size from any register \n    imm = ir_cst(0xa, 7, 0); // 2 byte of the encoded instruction always 0xA for AAM\n    al = ir_var(X86_EAX, 7,0);\n    // AH <- AL / 10\n    // AL <- AL % 10\n    block->add_instr(bblkid, ir_mov(tmp0, al, addr));\n    block->add_instr(bblkid, ir_div(ir_var(X86_EAX, 15, 8), tmp0, imm, addr));\n    block->add_instr(bblkid, ir_mod(al, tmp0, imm, addr));\n    \n    // Set flags : SF, ZF, PF\n    x86_set_sf(mode, al, addr, block, bblkid);\n    x86_set_zf(mode, al, addr, block, bblkid); \n    x86_set_pf(mode, al, addr, block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    return;\n}\n\n\ninline void x86_aas_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand   af, eax, cf, tmp0, tmp1, pc;   \n    if( mode == CPUMode::X86 ){\n        eax = ir_var(X86_EAX, 31, 0);\n        af = ir_var(X86_AF, 31, 0);\n        cf = ir_var(X86_CF, 31, 0);\n    }else if( mode == CPUMode::X64 ){\n        throw illegal_instruction_exception(\"X86 AAS instruction is valid only in 32-bit mode\");\n    }\n    tmp0 = ir_tmp(tmp_var_count++, af.size-1, 0), // Get the size from any register \n    tmp1 = ir_tmp(tmp_var_count++, af.size-1, 0);\n    /* If 4 LSB are > 9 or if AF is set then adjust the unpacked BCD values */\n    // (4 LSB) > 9\n    block->add_instr(bblkid, ir_bisz(tmp0, x86_arg_extract(eax, 3, 3), ir_cst(0, eax.size,0), addr));\n    block->add_instr(bblkid, ir_bisz(tmp1, x86_arg_extract(eax, 2, 1), ir_cst(0, eax.size,0), addr));\n    block->add_instr(bblkid, ir_and(tmp1, tmp1, tmp0, addr));\n    // AF\n    block->add_instr(bblkid, ir_or(tmp1, af, tmp1, addr));\n    // Branch depending on condition \n    block->add_instr(bblkid, ir_bcc(tmp1, ir_cst(bblkid+1, 31,0), ir_cst(bblkid+2, 31, 0), addr));\n    // 1°) Branch 1 - Do the adjust \n    bblkid = block->new_bblock();\n    // AL <- AL - 6\n    block->add_instr(bblkid, ir_sub(x86_arg_extract(eax, 7, 0), x86_arg_extract(eax, 7, 0), ir_cst(6, 7, 0), addr));\n    // AH    \n    block->add_instr(bblkid, ir_sub(x86_arg_extract(eax, 15, 8), x86_arg_extract(eax, 15, 8), ir_cst(1, 7, 0), addr));\n    // CF <- 1 , AF <- 1\n    block->add_instr(bblkid, ir_mov(af, ir_cst(1, af.size-1, 0), addr));\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(1, af.size-1, 0), addr));\n    // Jump to common end\n    block->add_instr(bblkid, ir_bcc(ir_cst(1, 0, 0), ir_cst(bblkid+2, 31, 0), ir_none(), addr));\n    \n    // 2°) Branch 2 - Just reset flags\n    bblkid = block->new_bblock();\n    block->add_instr(bblkid, ir_mov(af, ir_cst(0, af.size-1, 0), addr));\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(0, af.size-1, 0), addr));\n    // Jump to common end\n    block->add_instr(bblkid, ir_bcc(ir_cst(1, 0, 0), ir_cst(bblkid+1, 31, 0), ir_none(), addr));\n    \n    // 3°) Common end - Keep only 4 LSB of AL\n    bblkid = block->new_bblock();\n    block->add_instr(bblkid, ir_and(x86_arg_extract(eax, 7, 0), x86_arg_extract(eax, 7, 0), ir_cst(0xf, 7, 0), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_adc_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, prev_cf, pc;\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    }else{\n        op0 = dest;\n    }\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    res = ir_tmp(tmp_var_count++, (instr->detail->x86.operands[0].size*8)-1, 0);\n    if( mode == CPUMode::X86 )\n        prev_cf = ir_var(X86_CF, res.size-1, 0);\n    else if( mode == CPUMode::X64 )\n        prev_cf = ir_var(X64_CF, res.size-1, 0);\n    /* Do the add */\n    block->add_instr(bblkid, ir_add(res, op0, op1, addr));\n    block->add_instr(bblkid, ir_add(res, res, prev_cf, addr));\n    \n    /* Update flags */\n    x86_set_zf(mode, res, addr, block, bblkid);\n    x86_add_set_cf(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    x86_add_set_af(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    x86_add_set_of(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    x86_set_sf(mode, res, addr, block, bblkid);\n    x86_set_pf(mode, res, addr, block, bblkid, tmp_var_count);\n    \n    /* Finally assign the result to the destination */ \n    /* If the add is written in memory */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, res, addr));\n    /* Else direct register assign */\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_adcx_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, prev_cf, pc;\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    }else{\n        op0 = dest;\n    }\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    res = ir_tmp(tmp_var_count++, (instr->detail->x86.operands[0].size*8)-1, 0);\n    if( mode == CPUMode::X86 )\n        prev_cf = ir_var(X86_CF, res.size-1, 0);\n    else if( mode == CPUMode::X64 )\n        prev_cf = ir_var(X64_CF, res.size-1, 0);\n    /* Do the add */\n    block->add_instr(bblkid, ir_add(res, op0, op1, addr));\n    block->add_instr(bblkid, ir_add(res, res, prev_cf, addr));\n    \n    /* Update flags */\n    x86_add_set_cf(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    \n    /* Finally assign the result to the destination */ \n    /* ADCX destination is always a general purpose reg */\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_add_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, pc;\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    }else{\n        op0 = dest;\n    }\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    /* Do the add */\n    res = ir_tmp(tmp_var_count++, (instr->detail->x86.operands[0].size*8)-1, 0);\n    block->add_instr(bblkid, ir_add(res, op0, op1, addr));\n    \n    /* Update flags */\n    x86_set_zf(mode, res, addr, block, bblkid);\n    x86_add_set_cf(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    x86_add_set_af(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    x86_add_set_of(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    x86_set_sf(mode, res, addr, block, bblkid);\n    x86_set_pf(mode, res, addr, block, bblkid, tmp_var_count);\n    \n    /* Finally assign the result to the destination */ \n    /* If the add is written in memory */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, res, addr));\n    /* Else direct register assign */\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_and_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, of, cf, pc;\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    }else{\n        op0 = dest;\n    }\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    /* Do the and */\n    res = ir_tmp(tmp_var_count++, (instr->detail->x86.operands[0].size*8)-1, 0);\n    block->add_instr(bblkid, ir_and(res, op0, op1, addr));\n    \n    /* Update flags: SF, ZF, PF */\n    x86_set_zf(mode, res, addr, block, bblkid);\n    x86_set_sf(mode, res, addr, block, bblkid);\n    x86_set_pf(mode, res, addr, block, bblkid, tmp_var_count);\n    /* OF and CF cleared */\n    block->add_instr(bblkid, ir_mov(of, ir_cst(0, of.high, of.low), addr));\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(0, cf.high, cf.low), addr));\n    \n    /* Finally assign the result to the destination */ \n    /* If the add is written in memory */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, res, addr));\n    /* Else direct register assign */\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_andn_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, of, cf, pc;\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[2]), block, bblkid, tmp_var_count, true);\n    /* Do the not then the and */\n    res = ir_tmp(tmp_var_count++, (instr->detail->x86.operands[0].size*8)-1, 0);\n    block->add_instr(bblkid, ir_not(res, op0, addr));\n    block->add_instr(bblkid, ir_and(res, res, op1, addr));\n    \n    /* Update flags: SF, ZF */\n    x86_set_zf(mode, res, addr, block, bblkid);\n    x86_set_sf(mode, res, addr, block, bblkid);\n    /* OF and CF cleared */\n    block->add_instr(bblkid, ir_mov(of, ir_cst(0, of.high, of.low), addr));\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(0, cf.high, cf.low), addr));\n    \n    /* Finally assign the result to the destination */ \n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_blsi_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, of, cf, pc;\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    /* Do the not then the and */\n    res = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_neg(res, op0, addr));\n    block->add_instr(bblkid, ir_and(res, res, op0, addr));\n    \n    /* Update flags: SF, ZF */\n    x86_set_zf(mode, res, addr, block, bblkid);\n    x86_set_sf(mode, res, addr, block, bblkid);\n    /* CF set if op0 is source is not zero */\n    block->add_instr(bblkid, ir_bisz(cf, op0, ir_cst(0, cf.size-1, 0), addr));\n    /* OF cleared */\n    block->add_instr(bblkid, ir_mov(of, ir_cst(0, of.high, of.low), addr));\n    \n    /* Finally assign the result to the destination */ \n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_blsmsk_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, of, cf, zf, pc;\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    /* res <- (op0-1) XOR op0 */\n    res = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_sub(res, op0, ir_cst(1, op0.size-1, 0), addr));\n    block->add_instr(bblkid, ir_xor(res, res, op0, addr));\n    \n    /* Update flags: SF */\n    x86_set_sf(mode, res, addr, block, bblkid);\n    /* CF set if op0 is source is zero */\n    block->add_instr(bblkid, ir_bisz(cf, op0, ir_cst(1, cf.size-1, 0), addr));\n    /* OF and ZF cleared */\n    block->add_instr(bblkid, ir_mov(of, ir_cst(0, of.high, of.low), addr));\n    block->add_instr(bblkid, ir_mov(zf, ir_cst(0, of.high, of.low), addr));\n    \n    /* Finally assign the result to the destination */ \n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_blsr_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, of, cf, zf, pc;\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    /* res <- (op0-1) AND op0 */\n    res = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_sub(res, op0, ir_cst(1, op0.size-1, 0), addr));\n    block->add_instr(bblkid, ir_and(res, res, op0, addr));\n    \n    /* Update flags: SF, ZF */\n    x86_set_sf(mode, res, addr, block, bblkid);\n    x86_set_zf(mode, res, addr, block, bblkid);\n    /* CF set if op0 is source is zero */\n    block->add_instr(bblkid, ir_bisz(cf, op0, ir_cst(1, cf.size-1, 0), addr));\n    /* OF cleared */\n    block->add_instr(bblkid, ir_mov(of, ir_cst(0, of.high, of.low), addr));\n    \n    /* Finally assign the result to the destination */\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_bsf_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand dest, op0, counter, tmp0, zf, pc;\n    IRBasicBlockId loop_test, loop_body, loop_exit, op_is_zero, op_not_zero, end;\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0); \n    \n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    op_not_zero = block->new_bblock();\n    loop_test = block->new_bblock();\n    loop_body = block->new_bblock();\n    loop_exit = block->new_bblock();\n    op_is_zero = block->new_bblock();\n    end = block->new_bblock();\n    \n    // Update PC first because then we don't know what branch we take\n    pc = x86_get_pc(mode);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    // op0 == 0 ??\n    block->add_instr(bblkid, ir_bcc(op0, ir_cst(op_not_zero, 31, 0), ir_cst(op_is_zero, 31, 0), addr));\n    // 1°) Branch1 : op_not_zero\n    counter = ir_tmp(tmp_var_count++, dest.size-1, 0);\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(op_not_zero, ir_mov(counter, ir_cst(0, counter.size-1, 0), addr)); // counter <- 0\n    block->add_instr(op_not_zero, ir_bcc(ir_cst(1, 31, 0) , ir_cst(loop_test, 31, 0), ir_none(), addr));\n    // loop test: while ( op0[i] == 0 )\n    block->add_instr(loop_test, ir_shr(tmp0, op0, counter, addr));\n    block->add_instr(loop_test, ir_bcc(x86_arg_extract(tmp0,0,0) , ir_cst(loop_exit, 31, 0), ir_cst(loop_body, 31, 0), addr));\n    // loop body: counter = counter + 1\n    block->add_instr(loop_body, ir_add(counter, counter, ir_cst(1, counter.size-1, 0), addr));\n    block->add_instr(loop_body, ir_bcc(ir_cst(1, 31, 0) , ir_cst(loop_test, 31, 0), ir_none(), addr));\n    // loop exit: dest <- counter  and ZF <- 0\n    x86_adjust_reg_assign(mode, addr, block, loop_exit, tmp_var_count, dest, counter);\n    x86_set_zf(mode, op0, addr, block, loop_exit );\n    block->add_instr(loop_exit, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // 2°) Branch2: op_is_zero\n    // ZF <- 1\n    block->add_instr(op_is_zero, ir_mov(zf, ir_cst(1, zf.size-1, 0), addr));\n    block->add_instr(op_is_zero, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_bsr_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand dest, op0, counter, tmp0, zf, pc;\n    IRBasicBlockId loop_test, loop_body, loop_exit, op_is_zero, op_not_zero, end;\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0); \n    \n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    op_not_zero = block->new_bblock();\n    loop_test = block->new_bblock();\n    loop_body = block->new_bblock();\n    loop_exit = block->new_bblock();\n    op_is_zero = block->new_bblock();\n    end = block->new_bblock();\n    \n    // Update PC first because then we don't know what branch we take\n    pc = x86_get_pc(mode);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    // op0 == 0 ??\n    block->add_instr(bblkid, ir_bcc(op0, ir_cst(op_not_zero, 31, 0), ir_cst(op_is_zero, 31, 0), addr));\n    // 1°) Branch1 : op_not_zero\n    counter = ir_tmp(tmp_var_count++, dest.size-1, 0);\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(op_not_zero, ir_mov(counter, ir_cst((dest.size-1), counter.size-1, 0), addr)); // counter <- sizeof(op0)-1\n    block->add_instr(op_not_zero, ir_bcc(ir_cst(1, 31, 0) , ir_cst(loop_test, 31, 0), ir_none(), addr));\n    // loop test: while ( op0[i] == 0 )\n    block->add_instr(loop_test, ir_shr(tmp0, op0, counter, addr));\n    block->add_instr(loop_test, ir_bcc(x86_arg_extract(tmp0,0,0) , ir_cst(loop_exit, 31, 0), ir_cst(loop_body, 31, 0), addr));\n    // loop body: counter = counter - 1\n    block->add_instr(loop_body, ir_sub(counter, counter, ir_cst(1, counter.size-1, 0), addr));\n    block->add_instr(loop_body, ir_bcc(ir_cst(1, 31, 0) , ir_cst(loop_test, 31, 0), ir_none(), addr));\n    // loop exit: dest <- counter  and ZF <- 0\n    x86_adjust_reg_assign(mode, addr, block, loop_exit, tmp_var_count, dest, counter);\n    x86_set_zf(mode, op0, addr, block, loop_exit );\n    block->add_instr(loop_exit, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // 2°) Branch2: op0 == 0\n    // ZF <- 1\n    block->add_instr(op_is_zero, ir_mov(zf, ir_cst(1, zf.size-1, 0), addr));\n    block->add_instr(op_is_zero, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_bswap_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand dest, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, pc, res;\n    /* Get operand */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    if( dest.size == 64 ){\n        res = ir_tmp(tmp_var_count++, 63, 0);\n        tmp0 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp1 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp2 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp3 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp4 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp5 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp6 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp7 = ir_tmp(tmp_var_count++, 7, 0);\n        block->add_instr(bblkid, ir_mov(tmp0, x86_arg_extract(dest, 7, 0), addr));\n        block->add_instr(bblkid, ir_mov(tmp1, x86_arg_extract(dest, 15, 8), addr));\n        block->add_instr(bblkid, ir_mov(tmp2, x86_arg_extract(dest, 23, 16), addr));\n        block->add_instr(bblkid, ir_mov(tmp3, x86_arg_extract(dest, 31, 24), addr));\n        block->add_instr(bblkid, ir_mov(tmp4, x86_arg_extract(dest, 39,32), addr));\n        block->add_instr(bblkid, ir_mov(tmp5, x86_arg_extract(dest, 47, 40), addr));\n        block->add_instr(bblkid, ir_mov(tmp6, x86_arg_extract(dest, 55, 48), addr));\n        block->add_instr(bblkid, ir_mov(tmp7, x86_arg_extract(dest, 63, 56), addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 63, 56), tmp0, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 55, 48), tmp1, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 47, 40), tmp2, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 39, 32), tmp3, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 31, 24), tmp4, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 23, 16), tmp5, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 15, 8), tmp6, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 7, 0), tmp7, addr));\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    }else if( dest.size == 32 ){\n        res = ir_tmp(tmp_var_count++, 31, 0);\n        tmp0 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp1 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp2 = ir_tmp(tmp_var_count++, 7, 0);\n        tmp3 = ir_tmp(tmp_var_count++, 7, 0);\n        block->add_instr(bblkid, ir_mov(tmp0, x86_arg_extract(dest, 7, 0), addr));\n        block->add_instr(bblkid, ir_mov(tmp1, x86_arg_extract(dest, 15, 8), addr));\n        block->add_instr(bblkid, ir_mov(tmp2, x86_arg_extract(dest, 23, 16), addr));\n        block->add_instr(bblkid, ir_mov(tmp3, x86_arg_extract(dest, 31, 24), addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 31, 24), tmp0, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 23, 16), tmp1, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 15, 8), tmp2, addr));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(res, 7, 0), tmp3, addr));\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    }else{\n        throw runtime_exception(\"X86 BSWAP translation: accepts operands of size 32 or 64 only\");\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n}\n\n\ninline void x86_bt_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand base, off, cf, pc, tmp0;\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    /* Get operands */\n    base = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    off = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    tmp0 = ir_tmp(tmp_var_count++, base.size-1, 0);\n    \n    /* cf <- bit(base, off % {16/32/64})   */\n    block->add_instr(bblkid, ir_mod(tmp0, off, ir_cst(base.size, off.size-1, 0), addr));\n    block->add_instr(bblkid, ir_shr(tmp0, base, tmp0, addr));\n    block->add_instr(bblkid, ir_bisz(cf, x86_arg_extract(tmp0,0,0), ir_cst(0, cf.size-1, 0), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_btc_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand dest, base, off, cf, pc, tmp0, tmp1;\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    base = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    off = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    tmp0 = ir_tmp(tmp_var_count++, base.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, base.size-1, 0);\n    \n    /* cf <- bit(base, off % {16/32/64})   */\n    block->add_instr(bblkid, ir_mod(tmp0, off, ir_cst(base.size, off.size-1, 0), addr));\n    block->add_instr(bblkid, ir_shr(tmp1, base, tmp0, addr));\n    block->add_instr(bblkid, ir_bisz(cf, x86_arg_extract(tmp1,0,0), ir_cst(0, cf.size-1, 0), addr));\n    /* invert bit(base, off % ... )*/\n    block->add_instr(bblkid, ir_shl(tmp1, ir_cst(1, tmp0.size-1, 0), tmp0, addr));\n    block->add_instr(bblkid, ir_xor(tmp1, base, tmp1, addr));\n    \n    /* Set the bit in the destination */ \n    /* If the add is written in memory */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, tmp1, addr));\n    /* Else direct register assign */\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, tmp1);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_btr_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand dest, base, off, cf, pc, tmp0, tmp1;\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    base = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    off = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    tmp0 = ir_tmp(tmp_var_count++, base.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, base.size-1, 0);\n    \n    /* cf <- bit(base, off % {16/32/64})   */\n    block->add_instr(bblkid, ir_mod(tmp0, off, ir_cst(base.size, off.size-1, 0), addr));\n    block->add_instr(bblkid, ir_shr(tmp1, base, tmp0, addr));\n    block->add_instr(bblkid, ir_bisz(cf, x86_arg_extract(tmp1,0,0), ir_cst(0, cf.size-1, 0), addr));\n    /* bit(base, off % ... ) <- 0 */\n    block->add_instr(bblkid, ir_shl(tmp1, ir_cst(1, tmp0.size-1, 0), tmp0, addr));\n    block->add_instr(bblkid, ir_not(tmp1, tmp1, addr));\n    block->add_instr(bblkid, ir_and(tmp1, base, tmp1, addr));\n    \n    /* Set the bit in the destination */ \n    /* If the add is written in memory */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, tmp1, addr));\n    /* Else direct register assign */\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, tmp1);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_bts_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand dest, base, off, cf, pc, tmp0, tmp1;\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    base = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    off = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    tmp0 = ir_tmp(tmp_var_count++, base.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, base.size-1, 0);\n    \n    /* cf <- bit(base, off % {16/32/64})   */\n    block->add_instr(bblkid, ir_mod(tmp0, off, ir_cst(base.size, off.size-1, 0), addr));\n    block->add_instr(bblkid, ir_shr(tmp1, base, tmp0, addr));\n    block->add_instr(bblkid, ir_bisz(cf, x86_arg_extract(tmp1,0,0), ir_cst(0, cf.size-1, 0), addr));\n    /* bit(base, off % ... ) <- 1 */\n    block->add_instr(bblkid, ir_shl(tmp1, ir_cst(1, tmp0.size-1, 0), tmp0, addr));\n    block->add_instr(bblkid, ir_or(tmp1, base, tmp1, addr));\n    \n    /* Set the bit in the destination */ \n    /* If the add is written in memory */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, tmp1, addr));\n    /* Else direct register assign */\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, tmp1);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_bzhi_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand dest, op0, op1, cf, of, pc, index, tmp0, tmp1, opsize, res;\n    IRBasicBlockId  index_too_big = block->new_bblock(), \n                    index_ok = block->new_bblock(),\n                    end = block->new_bblock(); \n    /* Get operands */\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[2]), block, bblkid, tmp_var_count, true);\n    index = ir_tmp(tmp_var_count++, dest.size-1, 0);\n    tmp0 = ir_tmp(tmp_var_count++, dest.size-1, 0);\n    res = ir_tmp(tmp_var_count++, dest.size-1, 0);\n    \n    /* index <- op1[7:0]   \n     * dest <- op0\n     * dest[size(dest)-1:index] <- 0 \n     * cf = 1 iff index > size(dest)-1 \n     */\n    // Get index\n    block->add_instr(bblkid, ir_mov(index, op1, addr));\n    block->add_instr(bblkid, ir_and(index, index, ir_cst(0xff, index.size-1, 0), addr));\n    // Compare index and size operands\n    opsize = ir_cst(dest.size, dest.size-1, 0);\n    block->add_instr(bblkid, ir_sub(tmp0, opsize, ir_cst(1, opsize.size-1, 0), addr));\n    block->add_instr(bblkid, ir_sub(tmp0, tmp0, index, addr));\n    block->add_instr(bblkid, ir_bcc(x86_arg_extract(tmp0, tmp0.size-1, tmp0.size-1), \n                                    ir_cst(index_too_big, 31, 0),\n                                    ir_cst(index_ok, 31, 0),\n                                    addr));\n    // 1°) Index > size operands -1\n    block->add_instr(index_too_big, ir_mov(cf, ir_cst(1, cf.size-1, 0), addr));\n    block->add_instr(index_too_big, ir_mov(res, op0, addr));\n    x86_adjust_reg_assign(mode, addr, block, index_too_big, tmp_var_count, dest, res);\n    block->add_instr(index_too_big, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n\n    // 2°) Index < size operands\n    tmp1 = ir_tmp(tmp_var_count++, dest.size-1, 0);\n    block->add_instr(index_ok, ir_mov(cf, ir_cst(0, cf.size-1, 0), addr ));\n    // Get mask size(dest)-1 .. index\n    block->add_instr(index_ok, ir_shl(tmp0, ir_cst(1, index.size-1, 0), index, addr));\n    block->add_instr(index_ok, ir_neg(tmp1, tmp0, addr));\n    block->add_instr(index_ok, ir_or(tmp1, tmp1, tmp0, addr));\n    block->add_instr(index_ok, ir_not(tmp1, tmp1, addr));\n    // Mask res \n    block->add_instr(index_ok, ir_mov(res, dest, addr));\n    block->add_instr(index_ok, ir_and(res, op0, tmp1, addr));\n    x86_adjust_reg_assign(mode, addr, block, index_ok, tmp_var_count, dest, res);\n    block->add_instr(index_ok, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n\n    // 3° ) Common end: set flags and pc\n    // OF cleared \n    block->add_instr(end, ir_mov(of, ir_cst(0, of.size-1, 0), addr ));\n    // Set zf, cf\n    x86_set_sf(mode, res, addr, block, end);\n    x86_set_zf(mode, res, addr, block, end);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    bblkid = end;\n    return;\n}\n\ninline void x86_call_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, sp, pc;\n    \n    /* Increment program counter first because\n     * the call is maybe relative to EIP/RIP */\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.high, 0), addr));\n\n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    sp = (mode == CPUMode::X86)? ir_var(X86_ESP, 31, 0): ir_var(X64_RSP, 63, 0); \n    \n    /* Get and push next instruction address */\n    block->add_instr(bblkid, ir_sub(sp, sp, ir_cst(pc.size/8, pc.size-1, 0), addr));\n    block->add_instr(bblkid, ir_stm(sp, pc, addr));\n    \n    /* Jump to called address */\n    block->add_instr(bblkid, ir_jcc(ir_cst(1, pc.size-1, 0), op0, ir_none(), addr));\n    \n    return;\n}\n\ninline void x86_cbw_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand tmp0, reg, pc;\n    IRBasicBlockId ext0 = block->new_bblock(), \n                 ext1 = block->new_bblock(),\n                 end = block->new_bblock();\n    reg = (mode==CPUMode::X86)? ir_var(X86_EAX, 31, 0) : ir_var(X64_RAX, 63, 0);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /* ax <- sign_extend(al)   */\n    block->add_instr(bblkid, ir_bcc(x86_arg_extract(reg, 7, 7), ir_cst(ext1, 31, 0), ir_cst(ext0, 31, 0), addr));\n    // extend 1\n    block->add_instr(ext1, ir_mov(x86_arg_extract(reg, 15, 8), ir_cst(0xff, 7, 0), addr));\n    block->add_instr(ext1, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // extend 0\n    block->add_instr(ext0, ir_mov(x86_arg_extract(reg, 15, 8), ir_cst(0x0, 7, 0), addr));\n    block->add_instr(ext0, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cdq_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand tmp0, reg_a, reg_d, pc, cst0, cst1;\n    IRBasicBlockId ext0 = block->new_bblock(), \n                 ext1 = block->new_bblock(),\n                 end = block->new_bblock();\n    reg_a = (mode==CPUMode::X86)? ir_var(X86_EAX, 31, 0) : ir_var(X64_RAX, 63, 0);\n    reg_d = (mode==CPUMode::X86)? ir_var(X86_EDX, 31, 0) : ir_var(X64_RDX, 63, 0);\n    reg_d = x86_arg_extract(reg_d, 31, 0);\n    cst1 = ir_cst(0xffffffff, 31, 0);\n    cst0 = ir_cst(0, 31, 0);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /* edx <- replicate(eax[31])   */\n    block->add_instr(bblkid, ir_bcc(x86_arg_extract(reg_a, 31, 31), ir_cst(ext1, 31, 0), ir_cst(ext0, 31, 0), addr));\n    // extend 1\n    x86_adjust_reg_assign(mode, addr, block, ext1, tmp_var_count, reg_d, cst1);\n    block->add_instr(ext1, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // extend 0\n    x86_adjust_reg_assign(mode, addr, block, ext0, tmp_var_count, reg_d, cst0);\n    block->add_instr(ext0, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cdqe_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand tmp0, reg, pc;\n    if( mode == CPUMode::X86 ){\n        throw runtime_exception(\"CDQE: invalid instruction in X86 mode\");\n    }\n    \n    IRBasicBlockId ext0 = block->new_bblock(), \n                 ext1 = block->new_bblock(),\n                 end = block->new_bblock();\n    reg = ir_var(X64_RAX, 63, 0);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /* rax <- sign_extend(eax)   */\n    block->add_instr(bblkid, ir_bcc(x86_arg_extract(reg, 31, 31), ir_cst(ext1, 31, 0), ir_cst(ext0, 31, 0), addr));\n    // extend 1\n    block->add_instr(ext1, ir_mov(x86_arg_extract(reg, 63, 32), ir_cst(0xffffffff, 31, 0), addr));\n    block->add_instr(ext1, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // extend 0\n    block->add_instr(ext0, ir_mov(x86_arg_extract(reg, 63, 32), ir_cst(0x0, 31, 0), addr));\n    block->add_instr(ext0, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_clc_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, pc;\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // cf <- 0\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(0x0, cf.size-1, 0), addr));\n    return;\n}\n\ninline void x86_cld_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand df, pc;\n    df = (mode==CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // df <- 0\n    block->add_instr(bblkid, ir_mov(df, ir_cst(0x0, df.size-1, 0), addr));\n    return;\n}\n\ninline void x86_cli_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand if_flag, pc;\n    if_flag = (mode==CPUMode::X86)? ir_var(X86_IF, 31, 0) : ir_var(X64_IF, 63, 0);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // if_flag <- 0\n    block->add_instr(bblkid, ir_mov(if_flag, ir_cst(0x0, if_flag.size-1, 0), addr));\n    return;\n}\n\ninline void x86_cmc_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, pc;\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // complement cf\n    block->add_instr(bblkid, ir_xor(cf, cf, ir_cst(0x1, cf.size-1, 0), addr));\n    return;\n}\n\ninline void x86_cmova_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, zf, pc, tmp0, tmp1, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    tmp1 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if CF = 0 and ZF = 0 \n    block->add_instr(bblkid, ir_not(tmp0, cf, addr));\n    block->add_instr(bblkid, ir_not(tmp1, zf, addr));\n    block->add_instr(bblkid, ir_and(tmp1, tmp1, tmp0, addr));\n    block->add_instr(bblkid, ir_bcc(tmp1, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovae_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if CF = 0\n    block->add_instr(bblkid, ir_bcc(cf, ir_cst(dont_mov,31, 0), ir_cst(do_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovb_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if CF = 1\n    block->add_instr(bblkid, ir_bcc(cf, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovbe_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, zf, pc, tmp0, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if CF = 1 or ZF = 1 \n    block->add_instr(bblkid, ir_or(tmp0, cf, zf, addr));\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmove_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand zf, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if zf = 1\n    block->add_instr(bblkid, ir_bcc(zf, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovg_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, of, zf, pc, tmp0, tmp1, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    tmp1 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if ZF = 0 and OF = SF \n    block->add_instr(bblkid, ir_xor(tmp0, sf, of, addr));\n    block->add_instr(bblkid, ir_not(tmp0, tmp0, addr));\n    block->add_instr(bblkid, ir_not(tmp1, zf, addr));\n    block->add_instr(bblkid, ir_and(tmp1, tmp1, tmp0, addr));\n    block->add_instr(bblkid, ir_bcc(tmp1, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovge_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, of, zf, pc, tmp0, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if OF = SF \n    block->add_instr(bblkid, ir_xor(tmp0, sf, of, addr));\n    block->add_instr(bblkid, ir_not(tmp0, tmp0, addr));\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovl_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, of, zf, pc, tmp0, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if OF != SF \n    block->add_instr(bblkid, ir_xor(tmp0, sf, of, addr));\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovle_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, of, zf, pc, tmp0, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if ZF = 1 or OF != SF \n    block->add_instr(bblkid, ir_xor(tmp0, sf, of, addr));\n    block->add_instr(bblkid, ir_or(tmp0, x86_arg_extract(zf, 0, 0), tmp0, addr));\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovne_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand zf, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if ZF = 0\n    block->add_instr(bblkid, ir_bcc(zf, ir_cst(dont_mov,31, 0), ir_cst(do_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovno_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand of, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if OF = 0\n    block->add_instr(bblkid, ir_bcc(of, ir_cst(dont_mov,31, 0), ir_cst(do_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\n\ninline void x86_cmovnp_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pf, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    pf = (mode==CPUMode::X86)? ir_var(X86_PF, 31, 0) : ir_var(X64_PF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if PF = 0\n    block->add_instr(bblkid, ir_bcc(pf, ir_cst(dont_mov,31, 0), ir_cst(do_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovns_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if SF = 0\n    block->add_instr(bblkid, ir_bcc(sf, ir_cst(dont_mov,31, 0), ir_cst(do_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\n\ninline void x86_cmovo_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand of, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if OF = 1\n    block->add_instr(bblkid, ir_bcc(of, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovp_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pf, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    pf = (mode==CPUMode::X86)? ir_var(X86_PF, 31, 0) : ir_var(X64_PF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if OF = 1\n    block->add_instr(bblkid, ir_bcc(pf, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmovs_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, pc, op0, op1;\n    IRBasicBlockId  do_mov = block->new_bblock(),\n                    dont_mov = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if SF = 1\n    block->add_instr(bblkid, ir_bcc(sf, ir_cst(do_mov,31, 0), ir_cst(dont_mov, 31, 0), addr));\n    // do mov\n    x86_adjust_reg_assign(mode, addr, block, do_mov, tmp_var_count, op0, op1);\n    block->add_instr(do_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont mov - do nothing\n    /* but still assign op0 to itself if 64 bits (because in 64 bits, the instruction \n     * clears the upper bits when operands are 32 bits, even when the condition\n     * is false */\n    if( mode == CPUMode::X64 && op0.size == 32 ){\n        x86_adjust_reg_assign(mode, addr, block, dont_mov, tmp_var_count, op0, op0);\n    }\n    block->add_instr(dont_mov, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cmp_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, op1, tmp;\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    // Check if op1 is a imm and needs sign extend\n    if( op1.size < op0.size && op1.is_cst()){\n        op1 = ir_cst(cst_sign_extend(op1.size, op1.cst()), op0.size-1, 0);\n    }\n    // tmp <- op0 - op1\n    tmp = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_sub(tmp, op0, op1, addr));\n    \n    // Update flags\n    x86_set_pf( mode, tmp, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp, addr, block, bblkid );\n    x86_set_sf( mode, tmp, addr, block, bblkid );\n    x86_sub_set_of( mode, op0, op1, tmp, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, op0, op1, tmp, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, op0, op1, tmp, addr, block, bblkid, tmp_var_count );\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n   \n    return;\n}\n\ninline void x86_cmpsb_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, tmp1, tmp2, si, di, df;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    /* Get operands */\n    si = (mode == CPUMode::X86) ? ir_var(X86_ESI, 31, 0) : ir_var(X64_RSI, 63, 0);\n    di = (mode == CPUMode::X86) ? ir_var(X86_EDI, 31, 0) : ir_var(X64_RDI, 63, 0);\n    df = (mode == CPUMode::X86) ? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    \n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    \n    /* Read bytes from memory and compare them */\n    tmp0 = ir_tmp(tmp_var_count++, 7, 0);\n    tmp1 = ir_tmp(tmp_var_count++, 7, 0);\n    tmp2 = ir_tmp(tmp_var_count++, 7, 0);\n    \n    block->add_instr(bblkid, ir_ldm(tmp0, si, addr));\n    block->add_instr(bblkid, ir_ldm(tmp1, di, addr));\n    block->add_instr(bblkid, ir_sub(tmp2, tmp0, tmp1, addr));\n    \n    // Update flags\n    x86_set_pf( mode, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp2, addr, block, bblkid );\n    x86_set_sf( mode, tmp2, addr, block, bblkid );\n    x86_sub_set_of( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    \n    // Increment or decrement ESI/EDI according to DF\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, 31, 0), ir_cst(inc, 31, 0), addr));\n    \n    block->add_instr(inc, ir_add(si, si, ir_cst(1, si.size-1, 0), addr));\n    block->add_instr(inc, ir_add(di, di, ir_cst(1, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    block->add_instr(dec, ir_sub(si, si, ir_cst(1, si.size-1, 0), addr));\n    block->add_instr(dec, ir_sub(di, di, ir_cst(1, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_cmpsd_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, tmp1, tmp2, si, di, df;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    /* Get operands */\n    si = (mode == CPUMode::X86) ? ir_var(X86_ESI, 31, 0) : ir_var(X64_RSI, 63, 0);\n    di = (mode == CPUMode::X86) ? ir_var(X86_EDI, 31, 0) : ir_var(X64_RDI, 63, 0);\n    df = (mode == CPUMode::X86) ? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    \n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    \n    /* Read dwords from memory and compare them */\n    tmp0 = ir_tmp(tmp_var_count++, 31, 0);\n    tmp1 = ir_tmp(tmp_var_count++, 31, 0);\n    tmp2 = ir_tmp(tmp_var_count++, 31, 0);\n    \n    block->add_instr(bblkid, ir_ldm(tmp0, si, addr));\n    block->add_instr(bblkid, ir_ldm(tmp1, di, addr));\n    block->add_instr(bblkid, ir_sub(tmp2, tmp0, tmp1, addr));\n    \n    // Update flags\n    x86_set_pf( mode, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp2, addr, block, bblkid );\n    x86_set_sf( mode, tmp2, addr, block, bblkid );\n    x86_sub_set_of( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    \n    // Increment or decrement ESI/EDI according to DF\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, 31, 0), ir_cst(inc, 31, 0), addr));\n    \n    block->add_instr(inc, ir_add(si, si, ir_cst(4, si.size-1, 0), addr));\n    block->add_instr(inc, ir_add(di, di, ir_cst(4, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    block->add_instr(dec, ir_sub(si, si, ir_cst(4, si.size-1, 0), addr));\n    block->add_instr(dec, ir_sub(di, di, ir_cst(4, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_cmpsq_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, tmp1, tmp2, si, di, df;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    if( mode == CPUMode::X86 ){\n        throw runtime_exception(\"CMPSQ: instruction is invalid in X86 mode\");\n    }\n    \n    /* Get operands */\n    si = ir_var(X64_RSI, 63, 0);\n    di = ir_var(X64_RDI, 63, 0);\n    df = ir_var(X64_DF, 63, 0);\n    \n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    \n    /* Read words from memory and compare them */\n    tmp0 = ir_tmp(tmp_var_count++, 63, 0);\n    tmp1 = ir_tmp(tmp_var_count++, 63, 0);\n    tmp2 = ir_tmp(tmp_var_count++, 63, 0);\n    \n    block->add_instr(bblkid, ir_ldm(tmp0, si, addr));\n    block->add_instr(bblkid, ir_ldm(tmp1, di, addr));\n    block->add_instr(bblkid, ir_sub(tmp2, tmp0, tmp1, addr));\n    \n    // Update flags\n    x86_set_pf( mode, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp2, addr, block, bblkid );\n    x86_set_sf( mode, tmp2, addr, block, bblkid );\n    x86_sub_set_of( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    \n    // Increment or decrement ESI/EDI according to DF\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, 31, 0), ir_cst(inc, 31, 0), addr));\n    \n    block->add_instr(inc, ir_add(si, si, ir_cst(8, si.size-1, 0), addr));\n    block->add_instr(inc, ir_add(di, di, ir_cst(8, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    block->add_instr(dec, ir_sub(si, si, ir_cst(8, si.size-1, 0), addr));\n    block->add_instr(dec, ir_sub(di, di, ir_cst(8, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_cmpsw_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, tmp1, tmp2, si, di, df;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    /* Get operands */\n    si = (mode == CPUMode::X86) ? ir_var(X86_ESI, 31, 0) : ir_var(X64_RSI, 63, 0);\n    di = (mode == CPUMode::X86) ? ir_var(X86_EDI, 31, 0) : ir_var(X64_RDI, 63, 0);\n    df = (mode == CPUMode::X86) ? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    \n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    \n    /* Read words from memory and compare them */\n    tmp0 = ir_tmp(tmp_var_count++, 15, 0);\n    tmp1 = ir_tmp(tmp_var_count++, 15, 0);\n    tmp2 = ir_tmp(tmp_var_count++, 15, 0);\n    \n    block->add_instr(bblkid, ir_ldm(tmp0, si, addr));\n    block->add_instr(bblkid, ir_ldm(tmp1, di, addr));\n    block->add_instr(bblkid, ir_sub(tmp2, tmp0, tmp1, addr));\n    \n    // Update flags\n    x86_set_pf( mode, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp2, addr, block, bblkid );\n    x86_set_sf( mode, tmp2, addr, block, bblkid );\n    x86_sub_set_of( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, tmp0, tmp1, tmp2, addr, block, bblkid, tmp_var_count );\n    \n    // Increment or decrement ESI/EDI according to DF\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, 31, 0), ir_cst(inc, 31, 0), addr));\n    \n    block->add_instr(inc, ir_add(si, si, ir_cst(2, si.size-1, 0), addr));\n    block->add_instr(inc, ir_add(di, di, ir_cst(2, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    block->add_instr(dec, ir_sub(si, si, ir_cst(2, si.size-1, 0), addr));\n    block->add_instr(dec, ir_sub(di, di, ir_cst(2, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_cmpxchg_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, dest, op0, op1, ax, zf, tmp;\n    IRBasicBlockId eq, neq, end;\n    \n    eq = block->new_bblock();\n    neq = block->new_bblock();\n    end = block->new_bblock();\n    \n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    ax = (mode == CPUMode::X86) ? ir_var(X86_EAX, op0.size-1, 0) : ir_var(X64_RAX, op0.size-1, 0);\n    zf = (mode == CPUMode::X86) ? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n   /* Compare op0 and op1 */\n    tmp = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_sub(tmp, ax, op0, addr ));\n    /* Set flags */\n    x86_set_pf(mode, tmp, addr, block, bblkid, tmp_var_count);\n    x86_set_sf(mode, tmp, addr, block, bblkid );\n    x86_set_zf(mode, tmp, addr, block, bblkid );\n    x86_sub_set_af(mode, ax, op0, tmp, addr, block, bblkid, tmp_var_count);\n    x86_sub_set_cf(mode, ax, op0, tmp, addr, block, bblkid, tmp_var_count);\n    x86_sub_set_of(mode, ax, op0, tmp, addr, block, bblkid, tmp_var_count);\n    \n    /* Exchange values depending on zf */\n    block->add_instr(bblkid, ir_bcc(zf, ir_cst(eq, 31, 0), ir_cst(neq, 31, 0), addr));\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(eq, ir_stm(op0, op1, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, eq, tmp_var_count, op0, op1);\n    }\n    block->add_instr(eq, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    x86_adjust_reg_assign(mode, addr, block, neq, tmp_var_count, ax, op0);\n    block->add_instr(neq, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cpuid_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid, int& tmp_var_count){\n    IRBasicBlockId  leaf_0 = block->new_bblock(),\n                    end = block->new_bblock();\n    IROperand eax = (mode == CPUMode::X86)? ir_var(X86_EAX, 31, 0) : ir_var(X64_RAX, 63, 0);\n    IROperand ebx = (mode == CPUMode::X86)? ir_var(X86_EBX, 31, 0) : ir_var(X64_RBX, 63, 0);\n    IROperand ecx = (mode == CPUMode::X86)? ir_var(X86_ECX, 31, 0) : ir_var(X64_RCX, 63, 0);\n    IROperand edx = (mode == CPUMode::X86)? ir_var(X86_EDX, 31, 0) : ir_var(X64_RDX, 63, 0);\n\n    /* Test eax to know what cpuid leaf is requested */\n    block->add_instr(bblkid, ir_bcc(eax, ir_cst(end, 31, 0), ir_cst(leaf_0, 31, 0), addr));\n    \n    /* Leaf 0\n     * Return the CPU's manufacturer ID string in ebx, edx and ecx\n     * Set EAX to the higher supported leaf */\n    // Set registers to \"GenuineIntel\"\n    x86_adjust_reg_assign(mode, addr, block, leaf_0, tmp_var_count, ebx, ir_cst(0x756e6547, 31, 0));\n    x86_adjust_reg_assign(mode, addr, block, leaf_0, tmp_var_count, edx, ir_cst(0x49656e69, 31, 0));\n    x86_adjust_reg_assign(mode, addr, block, leaf_0, tmp_var_count, ecx, ir_cst(0x6c65746e, 31, 0));\n    // Set eax to 0 because other leafs are not supported yet\n    x86_adjust_reg_assign(mode, addr, block, leaf_0, tmp_var_count, eax, ir_cst(0, 31, 0));\n    block->add_instr(leaf_0, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n\n    bblkid = end;\n    return;\n}\n\ninline void x86_cqo_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand tmp0, reg_a, reg_d, pc, cst0, cst1;\n    if( mode == CPUMode::X86 ){\n        throw runtime_exception(\"CQO: invalid instruction in X86 mode\");\n    }\n    IRBasicBlockId ext0 = block->new_bblock(), \n                 ext1 = block->new_bblock(),\n                 end = block->new_bblock();\n    reg_a = ir_var(X64_RAX, 63, 0);\n    reg_d = ir_var(X64_RDX, 63, 0);\n    cst1 = ir_cst(0xffffffffffffffff, 63, 0);\n    cst0 = ir_cst(0, 63, 0);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /* edx <- replicate(eax[63]) */\n    block->add_instr(bblkid, ir_bcc(x86_arg_extract(reg_a, 63, 63), ir_cst(ext1, 31, 0), ir_cst(ext0, 31, 0), addr));\n    // extend 1\n    x86_adjust_reg_assign(mode, addr, block, ext1, tmp_var_count, reg_d, cst1);\n    block->add_instr(ext1, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // extend 0\n    x86_adjust_reg_assign(mode, addr, block, ext0, tmp_var_count, reg_d, cst0);\n    block->add_instr(ext0, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\n\ninline void x86_cwd_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand tmp0, reg_a, reg_d, pc;\n    IRBasicBlockId ext0 = block->new_bblock(), \n                 ext1 = block->new_bblock(),\n                 end = block->new_bblock();\n    reg_a = (mode==CPUMode::X86)? ir_var(X86_EAX, 31, 0) : ir_var(X64_RAX, 63, 0);\n    reg_d = (mode==CPUMode::X86)? ir_var(X86_EDX, 31, 0) : ir_var(X64_RDX, 63, 0);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /* dx <- replicate(ax[15])   */\n    block->add_instr(bblkid, ir_bcc(x86_arg_extract(reg_a, 15, 15), ir_cst(ext1, 31, 0), ir_cst(ext0, 31, 0), addr));\n    // extend 1\n    block->add_instr(ext1, ir_mov(x86_arg_extract(reg_d, 15, 0), ir_cst(0xffff, 15, 0), addr));\n    block->add_instr(ext1, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // extend 0\n    block->add_instr(ext0, ir_mov(x86_arg_extract(reg_d, 15, 0), ir_cst(0x0, 15, 0), addr));\n    block->add_instr(ext0, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_cwde_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand tmp0, reg, pc;\n    IRBasicBlockId ext0 = block->new_bblock(), \n                 ext1 = block->new_bblock(),\n                 end = block->new_bblock();\n    reg = (mode==CPUMode::X86)? ir_var(X86_EAX, 31, 0) : ir_var(X64_RAX, 63, 0);\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /* eax <- sign_extend(ax)   */\n    block->add_instr(bblkid, ir_bcc(x86_arg_extract(reg, 15, 15), ir_cst(ext1, 31, 0), ir_cst(ext0, 31, 0), addr));\n    // extend 1\n    if( mode == CPUMode::X64 ){ // zero higher bits\n        block->add_instr(ext1, ir_mov(x86_arg_extract(reg, 63, 32), ir_cst(0, 31, 0), addr));\n    }\n    block->add_instr(ext1, ir_mov(x86_arg_extract(reg, 31, 16), ir_cst(0xffff, 15, 0), addr));\n    block->add_instr(ext1, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // extend 0\n    if( mode == CPUMode::X64 ){ // zero higher bits\n        block->add_instr(ext0, ir_mov(x86_arg_extract(reg, 63, 32), ir_cst(0, 31, 0), addr));\n    }\n    block->add_instr(ext0, ir_mov(x86_arg_extract(reg, 31, 16), ir_cst(0x0, 15, 0), addr));\n    block->add_instr(ext0, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_dec_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, dest, op0, tmp;\n    \n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    \n    /* Decrement op0 */\n    tmp = ir_tmp(tmp_var_count++, op0.size-1, 0); \n    block->add_instr(bblkid, ir_sub(tmp, op0, ir_cst(1, op0.size-1, 0), addr ));\n    \n    /* Set flags (except CF) */\n    x86_set_pf(mode, tmp, addr, block, bblkid, tmp_var_count);\n    x86_set_sf(mode, tmp, addr, block, bblkid );\n    x86_set_zf(mode, tmp, addr, block, bblkid );\n    x86_sub_set_af(mode, op0, ir_cst(1, op0.size-1, 0), tmp, addr, block, bblkid, tmp_var_count);\n    x86_sub_set_of(mode, op0, ir_cst(1, op0.size-1, 0), tmp, addr, block, bblkid, tmp_var_count);\n    \n    /* Store result */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, tmp, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, tmp);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\n\ninline void x86_div_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, dividend, remainder, tmp, ax, dx, tmp_dividend, tmp_remainder;\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    ax = (mode == CPUMode::X86)? ir_var(X86_EAX, 31, 0) : ir_var(X64_RAX, 63, 0);\n    dx = (mode == CPUMode::X86)? ir_var(X86_EDX, 31, 0) : ir_var(X64_RDX, 63, 0);\n    tmp = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp_dividend = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp_remainder = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    if( op0.size == 8 ){\n        dividend = x86_arg_extract(ax, 7, 0);\n        remainder = x86_arg_extract(ax, 15, 8);\n    }else{\n        dividend = x86_arg_extract(ax, op0.size-1, 0);\n        remainder = x86_arg_extract(dx, op0.size-1, 0);\n    }\n    \n    /* Do the div */\n    block->add_instr(bblkid, ir_mov(tmp, x86_arg_extract(ax, op0.size-1, 0), addr));\n    block->add_instr(bblkid, ir_div(tmp_dividend, tmp , op0, addr ));\n    block->add_instr(bblkid, ir_mod(tmp_remainder, tmp , op0, addr ));\n    /* Assign results to registers */\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dividend, tmp_dividend);\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, remainder, tmp_remainder);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_idiv_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, ax, dx, tmp, dividend, remainder, tmp_dividend, tmp_remainder;\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    ax = (mode == CPUMode::X86)? ir_var(X86_EAX, 31, 0) : ir_var(X64_RAX, 63, 0);\n    dx = (mode == CPUMode::X86)? ir_var(X86_EDX, 31, 0) : ir_var(X64_RDX, 63, 0);\n    tmp = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp_dividend = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp_remainder = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    \n    if( op0.size == 8 ){\n        dividend = x86_arg_extract(ax, 7, 0);\n        remainder = x86_arg_extract(ax, 15, 8);\n    }else{\n        dividend = x86_arg_extract(ax, op0.size-1, 0);\n        remainder = x86_arg_extract(dx, op0.size-1, 0);\n    }\n\n    /* Quotient in *ax, remainder in *dx */\n    block->add_instr(bblkid, ir_mov(tmp, x86_arg_extract(ax, op0.size-1, 0), addr));\n    block->add_instr(bblkid, ir_sdiv(tmp_dividend, tmp , op0, addr ));\n    block->add_instr(bblkid, ir_smod(tmp_remainder, tmp , op0, addr ));\n    /* Assign results to registers */\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dividend, tmp_dividend);\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, remainder, tmp_remainder);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_imul_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, op1, op2, lower, higher, tmp0, tmp1, ax, tmp2, tmp3, tmp4, cf, of;\n    \n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0): ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0): ir_var(X64_OF, 63, 0);\n    \n    /* One-operand form */\n    if( instr->detail->x86.op_count == 1 ){\n        /* Get operands */\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n        ax = (mode == CPUMode::X86)? ir_var(X86_EAX, op0.size-1, 0): ir_var(X64_RAX, op0.size-1, 0);\n        if( op0.size == 8 ){\n            lower = (mode == CPUMode::X86)? ir_var(X86_EAX, 7, 0): ir_var(X64_RAX, 7, 0);\n            higher = (mode == CPUMode::X86)? ir_var(X86_EAX, 15, 8): ir_var(X64_RAX, 15, 8);\n        }else{\n            lower = (mode == CPUMode::X86)? ir_var(X86_EAX, op0.size-1, 0): ir_var(X64_RAX, op0.size-1, 0);\n            higher = (mode == CPUMode::X86)? ir_var(X86_EDX, op0.size-1, 0): ir_var(X64_RDX, op0.size-1, 0);\n        }\n        tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n        tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n        tmp2 = ir_tmp(tmp_var_count++, 0, 0);\n        tmp3 = ir_tmp(tmp_var_count++, 0, 0);\n        \n        /* Do the multiplication */\n        block->add_instr(bblkid, ir_smull(tmp0, ax, op0, addr));\n        block->add_instr(bblkid, ir_smulh(tmp1, ax, op0, addr));\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, lower, tmp0);\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, higher, tmp1);\n        \n        /* Set OF and CF iff the higher:lower != signextend(lower) \n         * SO we do \n         *      higher==0 && lower[n-1] == 0 \n         *  OR  higher==0xfff.... && lower[n-1] == 1 */\n        block->add_instr(bblkid, ir_bisz(tmp2, tmp1, ir_cst(1, 0, 0), addr));\n        block->add_instr(bblkid, ir_not(tmp3, x86_arg_extract(tmp0, tmp0.size-1, tmp0.size-1), addr));\n        block->add_instr(bblkid, ir_and(tmp2, tmp2, tmp3, addr));\n        block->add_instr(bblkid, ir_not(tmp1, tmp1, addr));\n        block->add_instr(bblkid, ir_bisz(tmp3, tmp1, ir_cst(1, 0, 0), addr));\n        block->add_instr(bblkid, ir_and(tmp3, tmp3, x86_arg_extract(tmp0, tmp0.size-1, tmp0.size-1),  addr));\n        block->add_instr(bblkid, ir_or(tmp3, tmp3, tmp2, addr));\n        \n        block->add_instr(bblkid, ir_bisz(cf, tmp3, ir_cst(1, 0, 0), addr));\n        block->add_instr(bblkid, ir_bisz(of, tmp3, ir_cst(1, 0, 0), addr));\n    \n    /* Two-operands form */\n    }else if( instr->detail->x86.op_count == 2){\n        /* Get operands */\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n        op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n        tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n        tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n        tmp2 = ir_tmp(tmp_var_count++, 0, 0);\n        tmp3 = ir_tmp(tmp_var_count++, 0, 0);\n        \n        /* Do the multiplication */\n        block->add_instr(bblkid, ir_smull(tmp0, op0, op1, addr));\n        block->add_instr(bblkid, ir_smulh(tmp1, op0, op1, addr));\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, op0, tmp0);\n        \n        /* Set OF and CF iff the higher:lower != signextend(lower) \n         * SO we do \n         *      higher==0 && lower[n-1] == 0 \n         *  OR  higher==0xfff.... && lower[n-1] == 1 */\n        block->add_instr(bblkid, ir_bisz(tmp2, tmp1, ir_cst(1, 0, 0), addr));\n        block->add_instr(bblkid, ir_not(tmp3, x86_arg_extract(tmp0, tmp0.size-1, tmp0.size-1), addr));\n        block->add_instr(bblkid, ir_and(tmp2, tmp2, tmp3, addr));\n        block->add_instr(bblkid, ir_not(tmp1, tmp1, addr));\n        block->add_instr(bblkid, ir_bisz(tmp3, tmp1, ir_cst(1, 0, 0), addr));\n        block->add_instr(bblkid, ir_and(tmp3, tmp3, x86_arg_extract(tmp0, tmp0.size-1, tmp0.size-1),  addr));\n        block->add_instr(bblkid, ir_or(tmp3, tmp3, tmp2, addr));\n        \n        block->add_instr(bblkid, ir_bisz(cf, tmp3, ir_cst(1, 0, 0), addr));\n        block->add_instr(bblkid, ir_bisz(of, tmp3, ir_cst(1, 0, 0), addr));\n         \n        \n    /* Three-operands form */\n    }else{\n        /* Get operands */\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n        op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n        op2 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[2]), block, bblkid, tmp_var_count, true);\n        if( op2.size == 8 )\n            op2 = ir_cst(op2.cst(), op1.size-1, 0); // Already sign extended in IROperand() constructor\n        tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n        tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n        tmp2 = ir_tmp(tmp_var_count++, 0, 0);\n        tmp3 = ir_tmp(tmp_var_count++, 0, 0);\n        \n        /* Do the multiplication */\n        block->add_instr(bblkid, ir_smull(tmp0, op1, op2, addr));\n        block->add_instr(bblkid, ir_smulh(tmp1, op1, op2, addr));\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, op0, tmp0);\n        \n        /* Set OF and CF iff the higher:lower != signextend(lower) \n         * SO we do \n         *      higher==0 && lower[n-1] == 0 \n         *  OR  higher==0xfff.... && lower[n-1] == 1 */\n        block->add_instr(bblkid, ir_bisz(tmp2, tmp1, ir_cst(1, 0, 0), addr));\n        block->add_instr(bblkid, ir_not(tmp3, x86_arg_extract(tmp0, tmp0.size-1, tmp0.size-1), addr));\n        block->add_instr(bblkid, ir_and(tmp2, tmp2, tmp3, addr));\n        block->add_instr(bblkid, ir_not(tmp1, tmp1, addr));\n        block->add_instr(bblkid, ir_bisz(tmp3, tmp1, ir_cst(1, 0, 0), addr));\n        block->add_instr(bblkid, ir_and(tmp3, tmp3, x86_arg_extract(tmp0, tmp0.size-1, tmp0.size-1),  addr));\n        block->add_instr(bblkid, ir_or(tmp3, tmp3, tmp2, addr));\n        \n        block->add_instr(bblkid, ir_bisz(cf, tmp3, ir_cst(1, 0, 0), addr));\n        block->add_instr(bblkid, ir_bisz(of, tmp3, ir_cst(1, 0, 0), addr));\n        \n    }\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_inc_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, dest, op0, tmp;\n    \n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    \n    /* Increment op0 */\n    tmp = ir_tmp(tmp_var_count++, op0.size-1, 0); \n    block->add_instr(bblkid, ir_add(tmp, op0, ir_cst(1, op0.size-1, 0), addr ));\n    \n    /* Set flags (except CF) */\n    x86_set_pf(mode, tmp, addr, block, bblkid, tmp_var_count);\n    x86_set_sf(mode, tmp, addr, block, bblkid );\n    x86_set_zf(mode, tmp, addr, block, bblkid );\n    x86_sub_set_af(mode, op0, ir_cst(1, op0.size-1, 0), tmp, addr, block, bblkid, tmp_var_count);\n    x86_sub_set_of(mode, op0, ir_cst(1, op0.size-1, 0), tmp, addr, block, bblkid, tmp_var_count);\n    \n    /* Store result */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, tmp, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, tmp);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_int_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, num, next_pc;\n    \n    /* Get operands */\n    pc = x86_get_pc(mode);\n    next_pc = ir_tmp(tmp_var_count++, pc.size-1, 0); \n    block->add_instr(bblkid, ir_add(next_pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    num = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    /* Create interrupt */\n    block->add_instr(bblkid, ir_int(num, next_pc, addr));\n    return;\n}\n\ninline void x86_int3_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, num, next_pc;\n    \n    /* Get operands */\n    pc = x86_get_pc(mode);\n    next_pc = ir_tmp(tmp_var_count++, pc.size-1, 0);\n    block->add_instr(bblkid, ir_add(next_pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /* Create interrupt 3 */\n    block->add_instr(bblkid, ir_int(ir_cst(3, 7, 0), next_pc, addr));\n    return;\n}\n\ninline void x86_leave_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, bp, sp;\n    \n    sp = (mode == CPUMode::X86)? ir_var(X86_ESP, 31, 0) : ir_var(X64_RSP, 63, 0);\n    bp = (mode == CPUMode::X86)? ir_var(X86_EBP, 31, 0) : ir_var(X64_RBP, 63, 0);    \n       \n    /* esp <- ebp\n     * ebp <- pop() */ \n    block->add_instr(bblkid, ir_mov(sp, bp, addr ));\n    block->add_instr(bblkid, ir_ldm(bp, sp, addr ));\n    block->add_instr(bblkid, ir_add(sp, sp, ir_cst(bp.size/8, sp.size-1, 0), addr ));\n    \n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_ja_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, tmp2, zf, cf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, cf.size-1, 0);\n\n    /* Condition CF = ZF = 0 */ \n    block->add_instr(bblkid, ir_or(tmp0, cf, zf, addr ));\n    \n    /* Two possible values */\n    block->add_instr(bblkid, ir_jcc(tmp0, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_jae_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, cf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    \n    /* Condition CF = 0 */ \n    block->add_instr(bblkid, ir_jcc(cf, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_jb_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, cf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n\n    /* Condition CF = 1 */ \n    block->add_instr(bblkid, ir_jcc(cf, ir_cst(op0.cst(), pc.size-1, 0), ir_cst(instr->size+addr, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_jbe_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, zf, cf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, cf.size-1, 0);\n    \n    /* Condition CF = 1 or ZF = 1 */ \n    block->add_instr(bblkid, ir_or(tmp0, cf, zf, addr ));\n    \n    /* Two possible values */\n    block->add_instr(bblkid, ir_jcc(tmp0, ir_cst(op0.cst(), pc.size-1, 0), ir_cst(instr->size+addr, pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jcxz_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, cx, op0;\n    if( mode == CPUMode::X64 ){\n        throw runtime_exception(\"JCXZ: invalid in X64 mode\");\n    }\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    cx = (mode == CPUMode::X86)? ir_var(X86_ECX, 15, 0) : ir_var(X64_RCX, 15, 0);\n    \n    /* Condition CX = 0 */ \n    block->add_instr(bblkid, ir_jcc(cx, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_je_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, zf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n\n    /* Condition ZF = 1 */ \n    block->add_instr(bblkid, ir_jcc(zf, ir_cst(op0.cst(), pc.size-1, 0), ir_cst(instr->size+addr, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_jecxz_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, ecx, op0;\n\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    ecx = (mode == CPUMode::X86)? ir_var(X86_ECX, 31, 0) : ir_var(X64_RCX, 31, 0);\n    \n    /* Condition ECX = 0 */ \n    block->add_instr(bblkid, ir_jcc(ecx, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jg_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, zf, sf, of, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    sf = (mode == CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, sf.size-1, 0);\n\n    /* Condition ZF = 0 and SF = OF */ \n    block->add_instr(bblkid, ir_xor(tmp0, sf, of, addr ));\n    block->add_instr(bblkid, ir_or(tmp0, tmp0, zf, addr ));\n    \n    /* Two possible values */\n    block->add_instr(bblkid, ir_jcc(tmp0, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jge_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, sf, of, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    sf = (mode == CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, sf.size-1, 0);\n    \n    /* Condition SF = OF */ \n    block->add_instr(bblkid, ir_xor(tmp0, sf, of, addr ));\n    \n    /* Two possible values */\n    block->add_instr(bblkid, ir_jcc(tmp0, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jl_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, sf, of, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    sf = (mode == CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, sf.size-1, 0);\n\n    /* Condition SF != OF */ \n    block->add_instr(bblkid, ir_xor(tmp0, sf, of, addr ));\n    \n    /* Two possible values */\n    block->add_instr(bblkid, ir_jcc(tmp0, ir_cst(op0.cst(), pc.size-1, 0), ir_cst(instr->size+addr, pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jle_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, zf, sf, of, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    sf = (mode == CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, sf.size-1, 0);\n\n    /* Condition ZF = 1 or SF != OF */ \n    block->add_instr(bblkid, ir_xor(tmp0, sf, of, addr ));\n    block->add_instr(bblkid, ir_or(tmp0, tmp0, zf, addr ));\n    \n    /* Two possible values */\n    block->add_instr(bblkid, ir_jcc(tmp0, ir_cst(op0.cst(), pc.size-1, 0), ir_cst(instr->size+addr, pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jmp_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0;\n\n    // Update PC first in case jmp is PC relative\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    block->add_instr(bblkid, ir_jcc(ir_cst(1, pc.size-1, 0), op0, ir_none(), addr ));\n\n    return;\n}\n\ninline void x86_jne_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, zf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n\n    /* Condition ZF = 0 */ \n    block->add_instr(bblkid, ir_jcc(zf, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jno_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, of, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    /* Condition OF = 0 */ \n    block->add_instr(bblkid, ir_jcc(of, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jnp_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, pf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    pf = (mode == CPUMode::X86)? ir_var(X86_PF, 31, 0) : ir_var(X64_PF, 63, 0);\n    \n    /* Condition PF = 0 */ \n    block->add_instr(bblkid, ir_jcc(pf, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jns_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, sf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    sf = (mode == CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n\n    /* Condition SF = 0 */ \n    block->add_instr(bblkid, ir_jcc(sf, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jo_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, of, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    /* Condition OF = 1 */ \n    block->add_instr(bblkid, ir_jcc(of, ir_cst(op0.cst(), pc.size-1, 0), ir_cst(instr->size+addr, pc.size-1, 0), addr ));\n    \n    return;\n}\n\ninline void x86_jp_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, pf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    pf = (mode == CPUMode::X86)? ir_var(X86_PF, 31, 0) : ir_var(X64_PF, 63, 0);\n    \n    /* Condition PF = 1 */ \n    block->add_instr(bblkid, ir_jcc(pf, ir_cst(op0.cst(), pc.size-1, 0), ir_cst(instr->size+addr, pc.size-1, 0), addr ));\n    \n    return;\n}\n\n\ninline void x86_jrcxz_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, rcx, op0;\n    \n    if( mode == CPUMode::X86 ){\n        throw runtime_exception(\"JRCXZ: invalid in X86 mode\");\n    }\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = ir_var(X64_RIP, 63, 0);\n    rcx = ir_var(X64_RCX, 63, 0);\n    \n    /* Condition RCX = 0 */ \n    block->add_instr(bblkid, ir_jcc(rcx, ir_cst(instr->size+addr, pc.size-1, 0), ir_cst(op0.cst(), pc.size-1, 0), addr ));\n    \n    return;\n}\n\n\ninline void x86_js_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, sf, op0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    sf = (mode == CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n\n    /* Condition SF = 1 */ \n    block->add_instr(bblkid, ir_jcc(sf, ir_cst(op0.cst(), pc.size-1, 0), ir_cst(instr->size+addr, pc.size-1, 0), addr ));\n    \n    return;\n}\n\n\ninline void x86_lahf_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, sf, zf, af, pf, cf, ax;\n\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    sf = (mode == CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    zf = (mode == CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    af = (mode == CPUMode::X86)? ir_var(X86_AF, 31, 0) : ir_var(X64_AF, 63, 0);\n    pf = (mode == CPUMode::X86)? ir_var(X86_PF, 31, 0) : ir_var(X64_PF, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    ax = (mode == CPUMode::X86)? ir_var(X86_EAX, 31, 0) : ir_var(X64_RAX, 63, 0);\n    \n    /* AH <- EFLAGS(SF:ZF:0:AF:0:PF:1:CF) */ \n    block->add_instr(bblkid, ir_mov(x86_arg_extract(ax, 15, 15), x86_arg_extract(sf, 0, 0), addr ));\n    block->add_instr(bblkid, ir_mov(x86_arg_extract(ax, 14, 14), x86_arg_extract(zf, 0, 0), addr ));\n    block->add_instr(bblkid, ir_mov(x86_arg_extract(ax, 13, 13), ir_cst(0, 0, 0), addr ));\n    block->add_instr(bblkid, ir_mov(x86_arg_extract(ax, 12, 12), x86_arg_extract(af, 0, 0), addr ));\n    block->add_instr(bblkid, ir_mov(x86_arg_extract(ax, 11, 11), ir_cst(0, 0, 0), addr ));\n    block->add_instr(bblkid, ir_mov(x86_arg_extract(ax, 10, 10), x86_arg_extract(pf, 0, 0), addr ));\n    block->add_instr(bblkid, ir_mov(x86_arg_extract(ax, 9, 9), ir_cst(1, 0, 0), addr ));\n    block->add_instr(bblkid, ir_mov(x86_arg_extract(ax, 8, 8), x86_arg_extract(cf, 0, 0), addr ));\n    \n    // Update PC\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_lea_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, tmp0, op0, op1;\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    \n    // Update PC first in case PC relative load!\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count);\n\n    /* Check operand sizes */\n    if( op0.size > op1.size ){\n        /* Zero extend */\n        tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n        block->add_instr(bblkid, ir_mov(tmp0, ir_cst(0, op0.size-1, 0), addr ));\n        block->add_instr(bblkid, ir_mov(x86_arg_extract(tmp0, op1.size-1, 0), op1, addr));\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, op0, tmp0);\n    }else{\n        /* Truncate if needed */\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, op0, x86_arg_extract(op1, op0.size-1, 0));\n    }\n}\n\ninline void x86_lodsb_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, al, si, df;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    al = (mode == CPUMode::X86)? ir_var(X86_EAX, 7, 0): ir_var(X64_RAX, 7, 0);\n    si = (mode == CPUMode::X86)? ir_var(X86_ESI, 31, 0): ir_var(X64_RSI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    /*  Load byte */\n    block->add_instr(bblkid, ir_ldm(al, si, addr));\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, 31, 0), ir_cst(inc, 31, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(si, si, ir_cst(1, si.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(si, si, ir_cst(1, si.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_lodsd_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, eax, si, df, tmp;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    eax = (mode == CPUMode::X86)? ir_var(X86_EAX, 31, 0): ir_var(X64_RAX, 31, 0);\n    si = (mode == CPUMode::X86)? ir_var(X86_ESI, 31, 0): ir_var(X64_RSI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    tmp = ir_tmp(tmp_var_count++, eax.size-1, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    /*  Load byte */\n    block->add_instr(bblkid, ir_ldm(tmp, si, addr));\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, eax, tmp);\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(si, si, ir_cst(4, si.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(si, si, ir_cst(4, si.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_lodsq_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, rax, si, df;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = ir_var(X64_DF, 63, 0);\n    rax = ir_var(X64_RAX, 63, 0);\n    si = ir_var(X64_RSI, 63, 0);\n    pc = ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    /*  Load byte */\n    block->add_instr(bblkid, ir_ldm(rax, si, addr));\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(si, si, ir_cst(8, si.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(si, si, ir_cst(8, si.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_lodsw_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, ax, si, df;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    ax = (mode == CPUMode::X86)? ir_var(X86_EAX, 15, 0): ir_var(X64_RAX, 15, 0);\n    si = (mode == CPUMode::X86)? ir_var(X86_ESI, 31, 0): ir_var(X64_RSI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    /*  Load byte */\n    block->add_instr(bblkid, ir_ldm(ax, si, addr));\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(si, si, ir_cst(2, si.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(si, si, ir_cst(2, si.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_mov_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, op1;\n\n    // Update PC (in case PC-relative addressing)\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    /*  Do the mov */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(op0, op1, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, op0, op1);\n    }\n    \n    return;\n}\n\ninline void x86_movsb_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, di, si, df, tmp0;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0);\n    si = (mode == CPUMode::X86)? ir_var(X86_ESI, 31, 0): ir_var(X64_RSI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n\n    /*  Load byte */\n    tmp0 = ir_tmp(tmp_var_count++, 7, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, si, addr));\n    block->add_instr(bblkid, ir_stm(di, tmp0, addr));\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment if DF = 0 */ \n    block->add_instr(inc, ir_add(si, si, ir_cst(1, si.size-1, 0), addr));\n    block->add_instr(inc, ir_add(di, di, ir_cst(1, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement if DF = 1*/\n    block->add_instr(dec, ir_sub(si, si, ir_cst(1, si.size-1, 0), addr));\n    block->add_instr(dec, ir_sub(di, di, ir_cst(1, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_movsd_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, di, si, df, tmp0;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0);\n    si = (mode == CPUMode::X86)? ir_var(X86_ESI, 31, 0): ir_var(X64_RSI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n\n    /*  Load dword */\n    tmp0 = ir_tmp(tmp_var_count++, 31, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, si, addr));\n    block->add_instr(bblkid, ir_stm(di, tmp0, addr));\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment if DF = 0 */ \n    block->add_instr(inc, ir_add(si, si, ir_cst(4, si.size-1, 0), addr));\n    block->add_instr(inc, ir_add(di, di, ir_cst(4, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement if DF = 1*/\n    block->add_instr(dec, ir_sub(si, si, ir_cst(4, si.size-1, 0), addr));\n    block->add_instr(dec, ir_sub(di, di, ir_cst(4, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_movsq_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, di, si, df, tmp0;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0);\n    si = (mode == CPUMode::X86)? ir_var(X86_ESI, 31, 0): ir_var(X64_RSI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n\n    /*  Load qword */\n    tmp0 = ir_tmp(tmp_var_count++, 63, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, si, addr));\n    block->add_instr(bblkid, ir_stm(di, tmp0, addr));\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment if DF = 0 */ \n    block->add_instr(inc, ir_add(si, si, ir_cst(8, si.size-1, 0), addr));\n    block->add_instr(inc, ir_add(di, di, ir_cst(8, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement if DF = 1*/\n    block->add_instr(dec, ir_sub(si, si, ir_cst(8, si.size-1, 0), addr));\n    block->add_instr(dec, ir_sub(di, di, ir_cst(8, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_movsw_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, di, si, df, tmp0;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0);\n    si = (mode == CPUMode::X86)? ir_var(X86_ESI, 31, 0): ir_var(X64_RSI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n\n    /*  Load word */\n    tmp0 = ir_tmp(tmp_var_count++, 15, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, si, addr));\n    block->add_instr(bblkid, ir_stm(di, tmp0, addr));\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment if DF = 0 */ \n    block->add_instr(inc, ir_add(si, si, ir_cst(2, si.size-1, 0), addr));\n    block->add_instr(inc, ir_add(di, di, ir_cst(2, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement if DF = 1*/\n    block->add_instr(dec, ir_sub(si, si, ir_cst(2, si.size-1, 0), addr));\n    block->add_instr(dec, ir_sub(di, di, ir_cst(2, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_movsx_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, op1, tmp0;\n    IRBasicBlockId pos, neg, end;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n\n    // Update PC\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    /*  Test MSB */\n    pos = block->new_bblock();\n    neg = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(x86_arg_extract(op1, op1.size-1, op1.size-1), ir_cst(neg, 31, 0), ir_cst(pos, 31, 0), addr));\n    /* Positive (0 extend) */\n    block->add_instr(pos, ir_mov(tmp0, ir_cst(0, tmp0.size-1, 0), addr));\n    block->add_instr(pos, ir_mov(x86_arg_extract(tmp0, op1.size-1, 0), op1, addr));\n    x86_adjust_reg_assign(mode, addr, block, pos, tmp_var_count, op0, tmp0);\n    block->add_instr(pos, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Negative (1 extend) */\n    block->add_instr(neg, ir_mov(tmp0, ir_cst((ucst_t)0xffffffffffffffff<<op1.size, tmp0.size-1, 0), addr));\n    block->add_instr(neg, ir_mov(x86_arg_extract(tmp0, op1.size-1, 0), op1, addr));\n    x86_adjust_reg_assign(mode, addr, block, neg, tmp_var_count, op0, tmp0);\n    block->add_instr(neg, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_movsxd_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, op1, tmp0;\n    IRBasicBlockId pos, neg, end;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n\n    // Update PC\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    /* If already same size, just mov */\n    if( op0.size == op1.size ){\n        block->add_instr(bblkid, ir_mov(op0, op1, addr));\n        return;\n    }\n    /*  Else extend : Test MSB */\n    pos = block->new_bblock();\n    neg = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(x86_arg_extract(op1, op1.size-1, op1.size-1), ir_cst(neg, 31, 0), ir_cst(pos, 31, 0), addr));\n    /* Positive (0 extend) */\n    block->add_instr(pos, ir_mov(tmp0, ir_cst(0, tmp0.size-1, 0), addr));\n    block->add_instr(pos, ir_mov(x86_arg_extract(tmp0, op1.size-1, 0), op1, addr));\n    x86_adjust_reg_assign(mode, addr, block, pos, tmp_var_count, op0, tmp0);\n    block->add_instr(pos, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Negative (1 extend) */\n    block->add_instr(neg, ir_mov(tmp0, ir_cst((ucst_t)0xffffffffffffffff<<op1.size, tmp0.size-1, 0), addr));\n    block->add_instr(neg, ir_mov(x86_arg_extract(tmp0, op1.size-1, 0), op1, addr));\n    x86_adjust_reg_assign(mode, addr, block, neg, tmp_var_count, op0, tmp0);\n    block->add_instr(neg, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_movzx_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, op1, tmp0;\n    \n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n\n    /* Positive (0 extend) */\n    block->add_instr(bblkid, ir_mov(tmp0, ir_cst(0, tmp0.size-1, 0), addr));\n    block->add_instr(bblkid, ir_mov(x86_arg_extract(tmp0, op1.size-1, 0), op1, addr));\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, op0, tmp0);\n\n    // Update PC\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_mul_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, lower, higher, tmp0, tmp1, ax, tmp2, tmp3, tmp4, cf, of;\n    \n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0): ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0): ir_var(X64_OF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    ax = (mode == CPUMode::X86)? ir_var(X86_EAX, op0.size-1, 0): ir_var(X64_RAX, op0.size-1, 0);\n    if( op0.size == 8 ){\n        lower = (mode == CPUMode::X86)? ir_var(X86_EAX, 7, 0): ir_var(X64_RAX, 15, 0);\n        higher = (mode == CPUMode::X86)? ir_var(X86_EAX, 15, 8): ir_var(X64_RAX, 15, 8);\n    }else{\n        lower = (mode == CPUMode::X86)? ir_var(X86_EAX, op0.size-1, 0): ir_var(X64_RAX, op0.size-1, 0);\n        higher = (mode == CPUMode::X86)? ir_var(X86_EDX, op0.size-1, 0): ir_var(X64_RDX, op0.size-1, 0);\n    }\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp2 = ir_tmp(tmp_var_count++, 0, 0);\n    tmp3 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Do the multiplication */\n    block->add_instr(bblkid, ir_mul(tmp0, ax, op0, addr));\n    block->add_instr(bblkid, ir_mulh(tmp1, ax, op0, addr));\n    \n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, lower, tmp0);\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, higher, tmp1);\n\n    /* Set OF and CF to 1 if high order bits are not zero, else clear */\n    block->add_instr(bblkid, ir_bisz(cf, tmp1, ir_cst(0, cf.size-1, 0), addr));\n    block->add_instr(bblkid, ir_bisz(of, tmp1, ir_cst(0, of.size-1, 0), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\n\ninline void x86_neg_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, cf, tmp0, dest;\n\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0): ir_var(X64_CF, 63, 0);\n\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n\n    /* CF = (op0 != 0) */\n    block->add_instr(bblkid, ir_bisz(cf, op0, ir_cst(0, cf.size-1, 0), addr));\n    /* Do the neg */\n    block->add_instr(bblkid, ir_neg(tmp0, op0, addr));\n\n    /* Set flags according to the result (same that for a sub from 0) */\n    x86_set_sf(mode, tmp0, addr, block, bblkid);\n    x86_set_zf(mode, tmp0, addr, block, bblkid);\n    x86_set_pf(mode, tmp0, addr, block, bblkid, tmp_var_count);\n    x86_sub_set_af(mode, ir_cst(0, op0.size-1, 0), op0, tmp0, addr, block, bblkid, tmp_var_count);\n    x86_sub_set_of(mode, ir_cst(0, op0.size-1, 0), op0, tmp0, addr, block, bblkid, tmp_var_count);\n\n    /*  Assign result */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, tmp0, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, tmp0);\n    }\n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    return;\n}\n\ninline void x86_nop_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc;\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_not_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, dest, tmp;\n    \n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    tmp = ir_tmp(tmp_var_count++, op0.size-1, 0);\n\n    /* Do the not */\n    block->add_instr(bblkid, ir_not(tmp, op0, addr));\n    \n    /*  Assign result */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, tmp, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, tmp);\n    } \n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_or_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, of, cf, pc;\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    \n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    }else{\n        op0 = dest;\n    }\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    /* Do the or */\n    res = ir_tmp(tmp_var_count++, (instr->detail->x86.operands[0].size*8)-1, 0);\n    block->add_instr(bblkid, ir_or(res, op0, op1, addr));\n    \n    /* Update flags: SF, ZF, PF */\n    x86_set_zf(mode, res, addr, block, bblkid);\n    x86_set_sf(mode, res, addr, block, bblkid);\n    x86_set_pf(mode, res, addr, block, bblkid, tmp_var_count);\n    /* OF and CF cleared */\n    block->add_instr(bblkid, ir_mov(of, ir_cst(0, of.high, of.low), addr));\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(0, cf.high, cf.low), addr));\n    \n    /* Finally assign the result to the destination */ \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, res, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    }\n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_pop_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, sp, pc, tmp0;\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    sp = (mode == CPUMode::X86)? ir_var(X86_ESP, 31, 0): ir_var(X64_RSP, 63, 0); \n    \n    /* Get the value on the stack */\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, sp, addr));\n    \n    /* Increment stack pointer */\n    block->add_instr(bblkid, ir_add(sp, sp, ir_cst(instr->detail->x86.operands[0].size, sp.size-1, 0), addr));\n    \n    /* Assign the value that was on the stack (AFTER incrementing ESP) */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(op0, tmp0, addr));\n    }else{\n        block->add_instr(bblkid, ir_mov(op0, tmp0, addr));\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_popad_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand esp, pc, edi, esi, ebp, ebx, edx, ecx, eax;\n    if( mode == CPUMode::X64 ){\n        throw runtime_exception(\"POPAD: invalid in X64 mode\");\n    }\n\n    /* Get operands */\n    esp = ir_var(X86_ESP, 31, 0);\n    edi = ir_var(X86_EDI, 31, 0); \n    esi = ir_var(X86_ESI, 31, 0); \n    ebp = ir_var(X86_EBP, 31, 0); \n    ebx = ir_var(X86_EBX, 31, 0); \n    edx = ir_var(X86_EDX, 31, 0); \n    ecx = ir_var(X86_ECX, 31, 0); \n    eax = ir_var(X86_EAX, 31, 0);  \n    \n    /* Get the registers on the stack:\n        EDI ← Pop();\n        ESI ← Pop();\n        EBP ← Pop();\n        Increment ESP by 4; (* Skip next 4 bytes of stack *)\n        EBX ← Pop();\n        EDX ← Pop();\n        ECX ← Pop();\n        EAX ← Pop(); */\n    \n    block->add_instr(bblkid, ir_ldm(edi, esp, addr));\n    block->add_instr(bblkid, ir_add(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    \n    block->add_instr(bblkid, ir_ldm(esi, esp, addr));\n    block->add_instr(bblkid, ir_add(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    \n    block->add_instr(bblkid, ir_ldm(ebp, esp, addr));\n    block->add_instr(bblkid, ir_add(esp, esp, ir_cst(esp.size/4, esp.size-1, 0), addr));\n    \n    block->add_instr(bblkid, ir_ldm(ebx, esp, addr));\n    block->add_instr(bblkid, ir_add(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    \n    block->add_instr(bblkid, ir_ldm(edx, esp, addr));\n    block->add_instr(bblkid, ir_add(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    \n    block->add_instr(bblkid, ir_ldm(ecx, esp, addr));\n    block->add_instr(bblkid, ir_add(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    \n    block->add_instr(bblkid, ir_ldm(eax, esp, addr));\n    block->add_instr(bblkid, ir_add(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_push_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, sp, pc;\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    sp = (mode == CPUMode::X86)? ir_var(X86_ESP, 31, 0): ir_var(X64_RSP, 63, 0); \n    \n    /* Decrement stack pointer */\n    block->add_instr(bblkid, ir_sub(sp, sp, ir_cst(instr->detail->x86.operands[0].size, sp.size-1, 0), addr));\n    \n    /* Get the value on the stack */\n    block->add_instr(bblkid, ir_stm(sp, op0, addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_pushad_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand esp, pc, edi, esi, ebp, ebx, edx, ecx, eax, tmp0;\n    \n    /* Get operands */\n    esp = (mode == CPUMode::X86)? ir_var(X86_ESP, 31, 0): ir_var(X64_RSP, 63, 0);\n    edi = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0); \n    esi = (mode == CPUMode::X86)? ir_var(X86_ESI, 31, 0): ir_var(X64_RSI, 63, 0); \n    ebp = (mode == CPUMode::X86)? ir_var(X86_EBP, 31, 0): ir_var(X64_RBP, 63, 0); \n    ebx = (mode == CPUMode::X86)? ir_var(X86_EBX, 31, 0): ir_var(X64_RBX, 63, 0); \n    edx = (mode == CPUMode::X86)? ir_var(X86_EDX, 31, 0): ir_var(X64_RDX, 63, 0); \n    ecx = (mode == CPUMode::X86)? ir_var(X86_ECX, 31, 0): ir_var(X64_RCX, 63, 0); \n    eax = (mode == CPUMode::X86)? ir_var(X86_EAX, 31, 0): ir_var(X64_RAX, 63, 0);  \n    tmp0 = ir_tmp(tmp_var_count++, esp.size-1, 0);\n    \n    /* Get the registers on the stack:\n        Temp ← (ESP);\n        Push(EAX);\n        Push(ECX);\n        Push(EDX);\n        Push(EBX);\n        Push(Temp);\n        Push(EBP);\n        Push(ESI);\n        Push(EDI); */\n    \n    block->add_instr(bblkid, ir_mov(tmp0, esp, addr));\n    \n    \n    block->add_instr(bblkid, ir_sub(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    block->add_instr(bblkid, ir_stm(esp, eax, addr));\n    \n    block->add_instr(bblkid, ir_sub(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    block->add_instr(bblkid, ir_stm(esp, ecx, addr));\n    \n    block->add_instr(bblkid, ir_sub(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    block->add_instr(bblkid, ir_stm(esp, edx, addr));\n    \n    block->add_instr(bblkid, ir_sub(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    block->add_instr(bblkid, ir_stm(esp, ebx, addr));\n    \n    block->add_instr(bblkid, ir_sub(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    block->add_instr(bblkid, ir_stm(esp, tmp0, addr));\n    \n    block->add_instr(bblkid, ir_sub(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    block->add_instr(bblkid, ir_stm(esp, ebp, addr));\n    \n    block->add_instr(bblkid, ir_sub(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    block->add_instr(bblkid, ir_stm(esp, esi, addr));\n    \n    block->add_instr(bblkid, ir_sub(esp, esp, ir_cst(esp.size/8, esp.size-1, 0), addr));\n    block->add_instr(bblkid, ir_stm(esp, edi, addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_rcl_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, pc, tmp0, cf, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, of, res;\n    IRBasicBlockId set_of, cont, rotate;\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n\n    /* Blocks */\n    rotate = block->new_bblock();\n    set_of = block->new_bblock();\n    cont = block->new_bblock();\n\n    // mask is 5 bits <= 32 bits operands, 6 bits for 64 bits operands\n    unsigned int mask = (op0.size == 64)? 0b111111 : 0b11111;\n\n    /* Mask the number of rotations N */\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_and(tmp0, x86_arg_extract(op1, op0.size-1, 0), ir_cst(mask, op0.size-1, 0), addr));\n    \n    /* If masked count is zero, go to end, else do rotate */\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(rotate, 31, 0), ir_cst(cont, 31, 0), addr));\n    \n    // Rotate \n    /* REG[up] = REG[size-1-N:N] = tmp1 */\n    block->add_instr(rotate, ir_shl(tmp1, op0, tmp0, addr)); // Just shift left\n    \n    /* REG[N-1] = CF = tmp2 */\n    tmp2 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp3 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_sub(tmp3, tmp0, ir_cst(1, tmp0.size-1, 0), addr)); // tmp3 = N-1\n    block->add_instr(rotate, ir_shl(tmp2, x86_arg_extract(cf, op0.size-1, 0), tmp3, addr)); // Just shift left of N-1\n    \n    /* REG[N-2:0] = REG[:size-N+1] = tmp5 */\n    tmp4 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp5 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_sub(tmp4, ir_cst(op0.size+1, op0.size-1, 0), tmp0, addr)); // tmp4 = size-N+1\n    block->add_instr(rotate, ir_shr(tmp5, op0, tmp4, addr)); // Shift right of size-N+1\n\n    /* Res is the OR of everything */\n    res = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_or(res, tmp1, tmp2, addr));\n    block->add_instr(rotate, ir_or(res, res, tmp5, addr));\n\n    /* Assign res to dest and CF */\n    /* CF = REG[size-N] (first CF because after we modify reg ! */\n    tmp6 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_shl(tmp6, op0, tmp3, addr)); // Just shift left of N-1\n    block->add_instr(rotate, ir_mov(x86_arg_extract(cf, 0, 0), x86_arg_extract(tmp6, tmp6.size-1, tmp6.size-1), addr));\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(rotate, ir_stm(dest, res, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, rotate, tmp_var_count, dest, res);\n    }\n    \n    /* Affect OF flag iff masked count == 1 (cf.res in tmp3)*/\n    tmp7 = ir_tmp(tmp_var_count++, tmp0.size-1, 0);\n    block->add_instr(rotate, ir_xor(tmp7, tmp0, ir_cst(1, tmp0.size-1, 0), addr));\n    block->add_instr(rotate, ir_bcc(tmp7, ir_cst(cont, 31, 0), ir_cst(set_of, 31, 0), addr));\n    block->add_instr(set_of, ir_xor(x86_arg_extract(of, 0, 0), x86_arg_extract(res, res.size-1, res.size-1), x86_arg_extract(cf, 0, 0), addr));\n    block->add_instr(set_of, ir_bcc(ir_cst(1, 31, 0) , ir_cst(cont, 31, 0), ir_none(), addr));\n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(cont, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    bblkid = cont;\n    return;\n}\n\ninline void x86_rcr_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, pc, tmp0, cf, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, of, res;\n    IRBasicBlockId set_of, cont, rotate;\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n\n    /* Blocks */\n    rotate = block->new_bblock();\n    set_of = block->new_bblock();\n    cont = block->new_bblock();\n\n    // mask is 5 bits <= 32 bits operands, 6 bits for 64 bits operands\n    unsigned int mask = (op0.size == 64)? 0b111111 : 0b11111;\n\n    /* Mask the number of rotations N */\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_and(tmp0, x86_arg_extract(op1, op0.size-1, 0), ir_cst(mask, op0.size-1, 0), addr));\n    \n    /* If masked count is zero, go to end, else do rotate */\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(rotate, 31, 0), ir_cst(cont, 31, 0), addr));\n    \n    // Rotate \n    /* REG[down] = REG[size-1:N] = tmp1 */\n    block->add_instr(rotate, ir_shr(tmp1, op0, tmp0, addr)); // Just shift right\n    \n    /* REG[size-N] = CF = tmp2 */\n    tmp2 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp3 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_sub(tmp3, ir_cst(tmp0.size, tmp0.size-1, 0), tmp0, addr)); // tmp3 = size-N\n    block->add_instr(rotate, ir_shl(tmp2, x86_arg_extract(cf, op0.size-1, 0), tmp3, addr)); // Just shift left of size-N\n    \n    /* REG[size:N] = REG[N-2:] = tmp5 */\n    tmp4 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp5 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_add(tmp4, ir_cst(1, op0.size-1, 0), tmp3, addr)); // tmp4 = size-N+1\n    block->add_instr(rotate, ir_shl(tmp5, op0, tmp4, addr)); // Shift left of size-N+1\n\n    /* Res is the OR of everything */\n    res = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_or(res, tmp1, tmp2, addr));\n    block->add_instr(rotate, ir_or(res, res, tmp5, addr));\n\n    /* Assign res to dest and CF */\n    /* CF = REG[size-N] (first CF because after we modify reg ! */\n    tmp6 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_shl(tmp6, op0, tmp3, addr)); // Just shift left of N-1\n    block->add_instr(rotate, ir_mov(x86_arg_extract(cf, 0, 0), x86_arg_extract(tmp6, tmp6.size-1, tmp6.size-1), addr));\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(rotate, ir_stm(dest, res, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, rotate, tmp_var_count, dest, res);\n    }\n    \n    /* Affect OF flag iff masked count == 1 (cf.res in tmp3)*/\n    tmp7 = ir_tmp(tmp_var_count++, tmp0.size-1, 0);\n    block->add_instr(rotate, ir_xor(tmp7, tmp0, ir_cst(1, tmp0.size-1, 0), addr));\n    block->add_instr(rotate, ir_bcc(tmp7, ir_cst(cont, 31, 0), ir_cst(set_of, 31, 0), addr));\n    block->add_instr(set_of, ir_xor(x86_arg_extract(of, 0, 0), x86_arg_extract(res, res.size-1, res.size-1), x86_arg_extract(res, res.size-2, res.size-2), addr));\n    block->add_instr(set_of, ir_bcc(ir_cst(1, 31, 0) , ir_cst(cont, 31, 0), ir_none(), addr));\n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(cont, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    bblkid = cont;\n    return;\n}\n\ninline void x86_rdtsc_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand   eax = (mode == CPUMode::X86)? ir_var(X86_EAX, 31, 0): ir_var(X64_RAX, 63, 0),\n                edx = (mode == CPUMode::X86)? ir_var(X86_EDX, 31, 0): ir_var(X64_RDX, 63, 0),\n                tsc = x86_get_tsc(mode), // TSC is always 64 bits\n                pc = x86_get_pc(mode);\n                    \n    // Higher 32 bits in edx, lower 32 bits in eax\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, edx, x86_arg_extract(tsc, 63, 32));\n    x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, eax, x86_arg_extract(tsc, 31, 0));\n\n    // Update PC\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n}\n\n\ninline void x86_ret_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, sp, pc, tmp0;\n    \n    /* Get operands */\n    sp = (mode == CPUMode::X86)? ir_var(X86_ESP, 31, 0): ir_var(X64_RSP, 63, 0); \n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0): ir_var(X64_RIP, 63, 0);\n    \n    /* Pop program counter */\n    tmp0 = ir_tmp(tmp_var_count++, pc.size-1, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, sp, addr));\n    block->add_instr(bblkid, ir_add(sp, sp, ir_cst(pc.size/8, sp.size-1, 0), addr));\n    \n    /* If source operand adjust sp */\n    if( instr->detail->x86.op_count != 0 ){\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n        // Set sp adjustment size according to sp size and add it to sp\n        block->add_instr(bblkid, ir_add(sp, sp, x86_arg_extract(op0, sp.size-1, 0), addr));\n    }\n\n    block->add_instr(bblkid, ir_jcc(ir_cst(1, pc.size-1, 0), tmp0, ir_none(), addr));\n    \n    return;\n}\n\ninline void x86_rol_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, pc, tmp0, cf, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, of;\n    IRBasicBlockId set_of, cont, set_cf, end, rotate;\n    \n    rotate = block->new_bblock();\n    set_cf = block->new_bblock();\n    cont = block->new_bblock();\n    set_of = block->new_bblock();\n    end = block->new_bblock();\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    // mask is 5 bits for <= 32 bits operands, 6 bits for 64 bits operands\n    unsigned int mask = (op0.size == 64)? 0b111111 : 0b11111;\n    \n    /* Mask the number of rotations */\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_and(tmp0, x86_arg_extract(op1, op0.size-1, 0), ir_cst(mask, op0.size-1, 0), addr)); \n\n    /* Check if count is 0 */\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(rotate, 31, 0), ir_cst(end, 31, 0), addr));\n\n    // Do rotate\n    /* Set rotations */\n    tmp4 = ir_tmp(tmp_var_count++, tmp0.size-1, 0);\n    block->add_instr(rotate, ir_sub(tmp4, ir_cst(op0.size, tmp0.size-1, 0), tmp0, addr));\n    \n    /* Rotate it (2 shifts) */\n    tmp2 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp3 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_shl(tmp2, op0, tmp0, addr));\n    block->add_instr(rotate, ir_shr(tmp3, op0, tmp4, addr));\n    block->add_instr(rotate, ir_or(tmp3, tmp3, tmp2, addr)); // res in tmp3\n    \n    /* Assign result to operand */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(rotate, ir_stm(dest, tmp3, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, rotate, tmp_var_count, dest, tmp3);\n    }\n    \n    /* Affect CF flag iff masked count != 0 */\n    block->add_instr(rotate, ir_bcc(tmp0, ir_cst(set_cf, 31, 0), ir_cst(end, 31, 0), addr));\n    block->add_instr(set_cf, ir_mov(x86_arg_extract(cf, 0, 0), x86_arg_extract(tmp3, 0, 0), addr));\n    block->add_instr(set_cf, ir_bcc(ir_cst(1, 31, 0), ir_cst(cont, 31, 0), ir_none(), addr));\n    \n    /* Affect OF flag iff masked count == 1 (res in tmp3) */\n    tmp7 = ir_tmp(tmp_var_count++, tmp0.size-1, 0);\n    block->add_instr(cont, ir_xor(tmp7, tmp0, ir_cst(1, tmp0.size-1, 0), addr));\n    block->add_instr(cont, ir_bcc(tmp7, ir_cst(end, 31, 0), ir_cst(set_of, 31, 0), addr));\n    block->add_instr(set_of, ir_xor(x86_arg_extract(of, 0, 0), x86_arg_extract(tmp3, tmp3.size-1, tmp3.size-1), x86_arg_extract(cf, 0, 0), addr)); \n    block->add_instr(set_of, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    bblkid = end;\n    return;\n}\n\ninline void x86_ror_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, pc, tmp0, cf, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, of;\n    IRBasicBlockId set_of, cont, set_cf, end, rotate;\n    \n    rotate = block->new_bblock();\n    set_cf = block->new_bblock();\n    cont = block->new_bblock();\n    set_of = block->new_bblock();\n    end = block->new_bblock();\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    // mask is 5 bits <= 32 bits operands, 6 bits for 64 bits operands\n    unsigned int mask = (op0.size == 64)? 0b111111 : 0b11111;\n    \n    /* Mask the number of rotations */\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_and(tmp0, x86_arg_extract(op1, op0.size-1, 0), ir_cst(mask, op0.size-1, 0), addr));\n\n    /* Check if count is 0 */\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(rotate, 31, 0), ir_cst(end, 31, 0), addr));\n\n    // Do rotate\n    block->add_instr(rotate, ir_mov(tmp1, tmp0, addr)); // copy of tmp0\n    /* Set rotations */\n    tmp4 = ir_tmp(tmp_var_count++, tmp0.size-1, 0);\n    block->add_instr(rotate, ir_sub(tmp4, ir_cst(op0.size, tmp0.size-1, 0), tmp0, addr));\n    \n    /* Rotate it (2 shifts) */\n    tmp2 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp3 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(rotate, ir_shr(tmp2, op0, tmp0, addr));\n    block->add_instr(rotate, ir_shl(tmp3, op0, tmp4, addr));\n    block->add_instr(rotate, ir_or(tmp3, tmp3, tmp2, addr)); // res in tmp3\n    \n    /* Assign result to operand */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(rotate, ir_stm(dest, tmp3, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, rotate, tmp_var_count, dest, tmp3);\n    }\n\n    /* Affect CF flag iff masked count != 0 */\n    block->add_instr(rotate, ir_bcc(tmp0, ir_cst(set_cf, 31, 0), ir_cst(end, 31, 0), addr));\n    block->add_instr(set_cf, ir_mov(x86_arg_extract(cf, 0, 0), x86_arg_extract(tmp3, tmp3.size-1, tmp3.size-1), addr));\n    block->add_instr(set_cf, ir_bcc(ir_cst(1, 31, 0), ir_cst(cont, 31, 0), ir_none(), addr));\n\n    /* Affect OF flag iff masked count == 1 (res in tmp3) */\n    tmp5 = ir_tmp(tmp_var_count++, tmp0.size-1, 0);\n    block->add_instr(cont, ir_xor(tmp5, tmp1, ir_cst(1, tmp0.size-1, 0), addr));\n    block->add_instr(cont, ir_bcc(tmp5, ir_cst(end, 31, 0), ir_cst(set_of, 31, 0), addr));\n    block->add_instr(set_of, ir_xor(x86_arg_extract(of, 0, 0), x86_arg_extract(tmp3, tmp3.size-2, tmp3.size-2), x86_arg_extract(cf, 0, 0), addr)); \n    block->add_instr(set_of, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    bblkid = end;\n    return;\n}\n\ninline void x86_sal_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, pc, tmp0, cf, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, of;\n    IRBasicBlockId set_of, cont, shift;\n\n    shift = block->new_bblock();\n    set_of = block->new_bblock();\n    cont = block->new_bblock();\n\n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    // mask is 5 bits for operands <= 32bits, 6 bits for 64 bits operands\n    unsigned int mask = (op0.size == 64)? 0b111111 : 0b11111;\n\n    /* Mask the number of rotations */\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_and(tmp0, x86_arg_extract(op1, op0.size-1, 0), ir_cst(mask, op0.size-1, 0), addr));\n\n    /* Check if count is 0 */\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(shift, 31, 0), ir_cst(cont, 31, 0), addr));\n\n    // Do shift\n    /* Affect CF (last bit shifted out) */\n    tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp4 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(shift, ir_sub(tmp1, ir_cst(op0.size, tmp0.size-1, 0), tmp0, addr)); // Num of the last bit that'll be shifted out\n    //block->add_instr(shift, ir_neg(tmp1, tmp1, addr)); // Shift right to get the bit\n    block->add_instr(shift, ir_shr(tmp4, op0, tmp1, addr));\n    block->add_instr(shift, ir_mov(x86_arg_extract(cf, 0, 0), x86_arg_extract(tmp4, 0, 0), addr));\n    \n    /* Do the shift */\n    tmp2 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(shift, ir_shl(tmp2, op0, tmp0, addr));\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(shift, ir_stm(dest, tmp2, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, shift, tmp_var_count, dest, tmp2);\n    }\n    \n    /* Affect OF flag iff masked count == 1 */\n    tmp3 = ir_tmp(tmp_var_count++, tmp0.size-1, 0);\n    block->add_instr(shift, ir_xor(tmp3, tmp0, ir_cst(1, tmp0.size-1, 0), addr));\n    block->add_instr(shift, ir_bcc(tmp3, ir_cst(cont, 31, 0), ir_cst(set_of, 31, 0), addr));\n    block->add_instr(set_of, ir_xor(x86_arg_extract(of, 0, 0), x86_arg_extract(tmp2, tmp2.size-1, tmp2.size-1), x86_arg_extract(cf, 0, 0), addr));\n    block->add_instr(set_of, ir_bcc(ir_cst(1, 31, 0) , ir_cst(cont, 31, 0), ir_none(), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(cont, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    bblkid = cont;\n    return;\n}\n\ninline void x86_sar_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, pc, tmp0, cf, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, of;\n    IRBasicBlockId set_of, cont, pos, neg, shift;\n    \n    shift = block->new_bblock();\n    pos = block->new_bblock();\n    neg = block->new_bblock();\n    set_of = block->new_bblock();\n    cont = block->new_bblock();\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    // mask is 5 bits for operands <= 32bits, 6 bits for 64 bits operands\n    unsigned int mask = (op0.size == 64)? 0b111111 : 0b11111;\n    \n    /* Mask the number of rotations */\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp3 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_and(tmp0, x86_arg_extract(op1, op0.size-1, 0), ir_cst(mask, op0.size-1, 0), addr));\n    block->add_instr(bblkid, ir_mov(tmp3, tmp0, addr)); // save in tmp3\n    \n    /* Check if count is 0 */\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(shift, 31, 0), ir_cst(cont, 31, 0), addr));\n\n    // Do shift\n    /* Affect CF (last bit shifted out) */\n    tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp4 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(shift, ir_sub(tmp1, tmp0, ir_cst(1, tmp0.size-1, 0), addr)); // Num of the last bit that'll be shifted out\n    //block->add_instr(shift, ir_neg(tmp1, tmp1, addr)); // Shift right to get the bit\n    block->add_instr(shift, ir_shr(tmp4, op0, tmp1, addr));\n    block->add_instr(shift, ir_mov(x86_arg_extract(cf, 0, 0), x86_arg_extract(tmp4, 0, 0), addr));\n    \n    /* Get mask for sign propagation when shifting */\n    tmp5 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp6 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(shift, ir_bcc(x86_arg_extract(op0, op0.size-1, op0.size-1), ir_cst(neg, 31, 0), ir_cst(pos, 31, 0), addr));\n    shift = block->new_bblock();\n    block->add_instr(pos, ir_mov(tmp5, ir_cst(0, tmp5.size-1, 0), addr));\n    block->add_instr(pos, ir_bcc(ir_cst(1, 31, 0), ir_cst(shift, 31, 0), ir_none(), addr));\n    block->add_instr(neg, ir_mov(tmp5, ir_cst(-1, tmp5.size-1, 0), addr));\n    block->add_instr(neg, ir_sub(tmp6, ir_cst(op0.size, tmp0.size-1, 0), tmp0, addr));\n    block->add_instr(neg, ir_shl(tmp5, tmp5, tmp6, addr));\n    block->add_instr(neg, ir_bcc(ir_cst(1, 31, 0), ir_cst(shift, 31, 0), ir_none(), addr));\n    \n    /* Do the shift */\n    tmp2 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(shift, ir_shr(tmp2, op0, tmp0, addr));\n    block->add_instr(shift, ir_or(tmp2, tmp2, tmp5, addr));\n\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(shift, ir_stm(dest, tmp2, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, shift, tmp_var_count, dest, tmp2);\n    }\n    \n    /* Affect OF flag iff masked count == 1 */\n    block->add_instr(shift, ir_xor(tmp3, tmp3, ir_cst(1, tmp0.size-1, 0), addr));\n    block->add_instr(shift, ir_bcc(tmp3, ir_cst(cont, 31, 0), ir_cst(set_of, 31, 0), addr));\n    block->add_instr(set_of, ir_mov(of, ir_cst(0, of.size-1, 0), addr));\n    block->add_instr(set_of, ir_bcc(ir_cst(1, 31, 0) , ir_cst(cont, 31, 0), ir_none(), addr));\n    \n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(cont, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = cont;\n    return;\n}\n\ninline void x86_scasb_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, al, di, df, tmp0, tmp1;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    al = (mode == CPUMode::X86)? ir_var(X86_EAX, 7, 0): ir_var(X64_RAX, 7, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    /*  Load byte */\n    tmp0 = ir_tmp(tmp_var_count++, al.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, al.size-1, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, di, addr));\n    block->add_instr(bblkid, ir_sub(tmp1, al, tmp0, addr));\n    \n    /* Set flags */\n    x86_set_pf( mode, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp1, addr, block, bblkid );\n    x86_set_sf( mode, tmp1, addr, block, bblkid );\n    x86_sub_set_of( mode, al, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, al, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, al, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    \n    /* Adjust DI */\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(di, di, ir_cst(1, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(di, di, ir_cst(1, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_scasd_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, eax, di, df, tmp0, tmp1;\n    IRBasicBlockId inc, dec, end, prefix_start;\n\n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    eax = (mode == CPUMode::X86)? ir_var(X86_EAX, 31, 0): ir_var(X64_RAX, 31, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 31, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    /*  Load byte */\n    tmp0 = ir_tmp(tmp_var_count++, eax.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, eax.size-1, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, di, addr));\n    block->add_instr(bblkid, ir_sub(tmp1, eax, tmp0, addr));\n    \n    /* Set flags */\n    x86_set_pf( mode, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp1, addr, block, bblkid );\n    x86_set_sf( mode, tmp1, addr, block, bblkid );\n    x86_sub_set_of( mode, eax, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, eax, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, eax, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    \n    /* Adjust DI */\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(di, di, ir_cst(4, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(di, di, ir_cst(4, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_scasq_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, rax, di, df, tmp0, tmp1;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = ir_var(X64_DF, 63, 0);\n    rax = ir_var(X64_RAX, 63, 0);\n    di = ir_var(X64_RDI, 63, 0);\n    pc = ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    /*  Load byte */\n    tmp0 = ir_tmp(tmp_var_count++, rax.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, rax.size-1, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, di, addr));\n    block->add_instr(bblkid, ir_sub(tmp1, rax, tmp0, addr));\n    \n    /* Set flags */\n    x86_set_pf( mode, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp1, addr, block, bblkid );\n    x86_set_sf( mode, tmp1, addr, block, bblkid );\n    x86_sub_set_of( mode, rax, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, rax, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, rax, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    \n    /* Adjust DI */\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(di, di, ir_cst(8, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(di, di, ir_cst(8, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_scasw_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, ax, di, df, tmp0, tmp1;\n    IRBasicBlockId inc, dec, end, prefix_start;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    ax = (mode == CPUMode::X86)? ir_var(X86_EAX, 15, 0): ir_var(X64_RAX, 15, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    prefix_start = _x86_init_prefix(mode, instr, addr, block, bblkid);\n    \n    /*  Load byte */\n    tmp0 = ir_tmp(tmp_var_count++, ax.size-1, 0);\n    tmp1 = ir_tmp(tmp_var_count++, ax.size-1, 0);\n    block->add_instr(bblkid, ir_ldm(tmp0, di, addr));\n    block->add_instr(bblkid, ir_sub(tmp1, ax, tmp0, addr));\n    \n    /* Set flags */\n    x86_set_pf( mode, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp1, addr, block, bblkid );\n    x86_set_sf( mode, tmp1, addr, block, bblkid );\n    x86_sub_set_of( mode, ax, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, ax, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, ax, tmp0, tmp1, addr, block, bblkid, tmp_var_count );\n    \n    /* Adjust DI */\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(di, di, ir_cst(2, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(di, di, ir_cst(2, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Add prefix if any */\n    _x86_end_prefix(mode, instr, addr, block, prefix_start, end, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_seta_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, zf, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    tmp1 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if CF = 0 and ZF = 0 \n    block->add_instr(bblkid, ir_not(tmp0, cf, addr));\n    block->add_instr(bblkid, ir_not(tmp1, zf, addr));\n    block->add_instr(bblkid, ir_and(tmp1, tmp1, tmp0, addr));\n    block->add_instr(bblkid, ir_bcc(tmp1, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setae_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if CF = 0 \n    block->add_instr(bblkid, ir_bcc(cf, ir_cst(dont_set,31, 0), ir_cst(set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setb_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if CF = 1\n    block->add_instr(bblkid, ir_bcc(cf, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setbe_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand cf, zf, pc, tmp0, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    cf = (mode==CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, cf.size-1, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if CF = 1 or ZF = 1 \n    block->add_instr(bblkid, ir_or(tmp0, cf, zf, addr));\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    \n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_sete_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand zf, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if ZF = 1\n    block->add_instr(bblkid, ir_bcc(zf, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setg_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, zf, of, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    tmp1 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if ZF = 0 and  SF=OF \n    block->add_instr(bblkid, ir_xor(tmp0, x86_arg_extract(sf, 0, 0), x86_arg_extract(of, 0, 0), addr));\n    block->add_instr(bblkid, ir_not(tmp0, tmp0, addr));\n    block->add_instr(bblkid, ir_not(tmp1, x86_arg_extract(zf, 0, 0), addr));\n    block->add_instr(bblkid, ir_and(tmp1, tmp1, tmp0, addr));\n    block->add_instr(bblkid, ir_bcc(tmp1, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    \n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setge_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, of, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if SF=OF \n    block->add_instr(bblkid, ir_xor(tmp0, x86_arg_extract(sf, 0, 0), x86_arg_extract(of, 0, 0), addr));\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(dont_set,31, 0), ir_cst(set, 31, 0), addr));\n    \n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setl_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, of, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if SF != OF \n    block->add_instr(bblkid, ir_xor(tmp0, x86_arg_extract(sf, 0, 0), x86_arg_extract(of, 0, 0), addr));\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    \n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setle_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, zf, of, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    tmp0 = ir_tmp(tmp_var_count++, 0, 0);\n    tmp1 = ir_tmp(tmp_var_count++, 0, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if ZF = 1 or  SF != OF \n    block->add_instr(bblkid, ir_xor(tmp0, x86_arg_extract(sf, 0, 0), x86_arg_extract(of, 0, 0), addr));\n    block->add_instr(bblkid, ir_or(tmp0, x86_arg_extract(zf, 0, 0), tmp0, addr));\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    \n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setne_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand zf, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    zf = (mode==CPUMode::X86)? ir_var(X86_ZF, 31, 0) : ir_var(X64_ZF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if ZF = 0\n    block->add_instr(bblkid, ir_bcc(zf, ir_cst(dont_set,31, 0), ir_cst(set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setno_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand of, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if OF = 0\n    block->add_instr(bblkid, ir_bcc(of, ir_cst(dont_set,31, 0), ir_cst(set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setnp_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pf, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    pf = (mode==CPUMode::X86)? ir_var(X86_PF, 31, 0) : ir_var(X64_PF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if PF = 0\n    block->add_instr(bblkid, ir_bcc(pf, ir_cst(dont_set,31, 0), ir_cst(set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setns_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if SF = 0\n    block->add_instr(bblkid, ir_bcc(sf, ir_cst(dont_set,31, 0), ir_cst(set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_seto_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand of, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    of = (mode==CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if OF = 1\n    block->add_instr(bblkid, ir_bcc(of, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_setp_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pf, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    pf = (mode==CPUMode::X86)? ir_var(X86_PF, 31, 0) : ir_var(X64_PF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if PF = 1\n    block->add_instr(bblkid, ir_bcc(pf, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_sets_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand sf, pc, tmp0, tmp1, op0;\n    IRBasicBlockId  set = block->new_bblock(),\n                    dont_set = block->new_bblock(),\n                    end = block->new_bblock();\n    sf = (mode==CPUMode::X86)? ir_var(X86_SF, 31, 0) : ir_var(X64_SF, 63, 0);\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    // test if SF = 1\n    block->add_instr(bblkid, ir_bcc(sf, ir_cst(set,31, 0), ir_cst(dont_set, 31, 0), addr));\n    // do set\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(set, ir_stm(op0, ir_cst(1, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(set, ir_mov(op0, ir_cst(1, op0.size-1, 0), addr));\n    }\n    block->add_instr(set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    // dont set - put zero \n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(dont_set, ir_stm(op0, ir_cst(0, op0.size-1, 0), addr));\n    }else{\n        block->add_instr(dont_set, ir_mov(op0, ir_cst(0, op0.size-1, 0), addr));\n    }\n    block->add_instr(dont_set, ir_bcc(ir_cst(1, 31, 0) , ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_shr_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, pc, tmp0, cf, tmp1, tmp2, tmp3, tmp4, tmp5, of;\n    IRBasicBlockId set_of, cont, end;\n    \n    cont = block->new_bblock();\n    set_of = block->new_bblock();\n    end = block->new_bblock();\n    \n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    \n    // 5 bits for <= 32bits, 6 bits for 64bits\n    unsigned int mask = (op0.size == 64 )? 0b111111 : 0b11111;\n\n    /* Mask the number of rotations */\n    tmp0 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_and(tmp0, x86_arg_extract(op1, op0.size-1, 0), ir_cst(mask, op0.size-1, 0), addr)); \n    \n    /* Test if masked count is 0 */\n    block->add_instr(bblkid, ir_bcc(tmp0, ir_cst(cont, 31, 0), ir_cst(end, 31, 0), addr));\n\n    // Do shift\n    /* Affect CF (last bit shifted out) */\n    tmp1 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    tmp4 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(cont, ir_sub(tmp1, tmp0, ir_cst(1, tmp0.size-1, 0), addr)); // Num of the last bit that'll be shifted out\n    block->add_instr(cont, ir_shr(tmp4, op0, tmp1, addr));\n    block->add_instr(cont, ir_mov(x86_arg_extract(cf, 0, 0), x86_arg_extract(tmp4, 0, 0), addr));\n\n    /* Shift */\n    tmp2 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(cont, ir_shr(tmp2, op0, tmp0, addr));\n    /* Save op0 */\n    tmp5 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(cont, ir_mov(tmp5, op0, addr));\n    // Assign res\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(cont, ir_stm(dest, tmp2, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, cont, tmp_var_count, dest, tmp2);\n    }\n\n    /* Affect OF flag iff masked count == 1 */\n    tmp3 = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(cont, ir_xor(tmp3, tmp0, ir_cst(1, tmp0.size-1, 0), addr));\n    block->add_instr(cont, ir_bcc(tmp3, ir_cst(end, 31, 0), ir_cst(set_of, 31, 0), addr));\n    block->add_instr(set_of, ir_mov(x86_arg_extract(of,0,0), x86_arg_extract(op0, op0.size-1, op0.size-1), addr));\n    block->add_instr(set_of, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(end, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    bblkid = end;\n    return;\n}\n\ninline void x86_stc_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, cf;\n    \n    /* Get operand */\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    \n    /* Set flag */\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(1, cf.size-1, 0), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_std_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, df;\n    \n    /* Get operand */\n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    \n    /* Set flag */\n    block->add_instr(bblkid, ir_mov(df, ir_cst(1, df.size-1, 0), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_sti_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, iflag;\n    \n    /* Get operand */\n    iflag = (mode == CPUMode::X86)? ir_var(X86_IF, 31, 0) : ir_var(X64_IF, 63, 0);\n    \n    /* Set flag */\n    block->add_instr(bblkid, ir_mov(iflag, ir_cst(1, iflag.size-1, 0), addr));\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_stosb_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, al, di, df;\n    IRBasicBlockId inc, dec, end;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    al = (mode == CPUMode::X86)? ir_var(X86_EAX, 7, 0): ir_var(X64_RAX, 7, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    // Update PC\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /*  Store byte */\n    block->add_instr(bblkid, ir_stm(di, al, addr));\n    \n    /* Adjust DI */\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(di, di, ir_cst(1, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n    \n    /* Or decrement */\n    block->add_instr(dec, ir_sub(di, di, ir_cst(1, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n\n    bblkid = end;\n    return;\n}\n\ninline void x86_stosd_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, eax, di, df;\n    IRBasicBlockId inc, dec, end;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    eax = (mode == CPUMode::X86)? ir_var(X86_EAX, 31, 0): ir_var(X64_RAX, 31, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    // Update PC\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /*  Store byte */\n    block->add_instr(bblkid, ir_stm(di, eax, addr));\n    \n    /* Adjust DI */\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(di, di, ir_cst(4, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(di, di, ir_cst(4, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n\n    bblkid = end;\n    return;\n}\n\ninline void x86_stosq_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, rax, di, df;\n    IRBasicBlockId inc, dec, end;\n    \n    df = ir_var(X64_DF, 63, 0);\n    rax = ir_var(X64_RAX, 63, 0);\n    di = ir_var(X64_RDI, 63, 0);\n    pc = ir_var(X64_RIP, 63, 0);\n\n    // Update PC\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    /*  Store byte */\n    block->add_instr(bblkid, ir_stm(di, rax, addr));\n    \n    /* Adjust DI */\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(di, di, ir_cst(8, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(di, di, ir_cst(8, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n\n    bblkid = end;\n    return;\n}\n\ninline void x86_stosw_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, ax, di, df;\n    IRBasicBlockId inc, dec, end;\n    \n    df = (mode == CPUMode::X86)? ir_var(X86_DF, 31, 0) : ir_var(X64_DF, 63, 0);\n    ax = (mode == CPUMode::X86)? ir_var(X86_EAX, 15, 0): ir_var(X64_RAX, 15, 0);\n    di = (mode == CPUMode::X86)? ir_var(X86_EDI, 31, 0): ir_var(X64_RDI, 63, 0);\n    pc = (mode == CPUMode::X86)? ir_var(X86_EIP, 31, 0) : ir_var(X64_RIP, 63, 0);\n\n    // Update PC\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    /*  Store byte */\n    block->add_instr(bblkid, ir_stm(di, ax, addr));\n    \n    /* Adjust DI */\n    inc = block->new_bblock();\n    dec = block->new_bblock();\n    end = block->new_bblock();\n    block->add_instr(bblkid, ir_bcc(df, ir_cst(dec, df.size-1, 0), ir_cst(inc, df.size-1, 0), addr));\n    /* Increment */ \n    block->add_instr(inc, ir_add(di, di, ir_cst(2, di.size-1, 0), addr));\n    block->add_instr(inc, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n    /* Or decrement */\n    block->add_instr(dec, ir_sub(di, di, ir_cst(2, di.size-1, 0), addr));\n    block->add_instr(dec, ir_bcc(ir_cst(1, 31, 0), ir_cst(end, 31, 0), ir_none(), addr));\n    \n    bblkid = end;\n    return;\n}\n\ninline void x86_sub_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, op1, tmp, dest;\n    \n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    // tmp <- op0 - op1\n    tmp = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_sub(tmp, op0, op1, addr));\n    \n    // Update flags\n    x86_set_pf( mode, tmp, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp, addr, block, bblkid );\n    x86_set_sf( mode, tmp, addr, block, bblkid );\n    x86_sub_set_of( mode, op0, op1, tmp, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_cf( mode, op0, op1, tmp, addr, block, bblkid, tmp_var_count );\n    x86_sub_set_af( mode, op0, op1, tmp, addr, block, bblkid, tmp_var_count );\n    \n    /* Set dest operand */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, tmp, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, tmp);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    return;\n}\n\ninline void x86_syscall_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, type, next_pc;\n    \n    if( mode == CPUMode::X86 ){\n        throw unsupported_instruction_exception(\"SYSCALL: not supported in X86 mode\");\n    }\n\n    /* Get operands */\n    pc = x86_get_pc(mode);\n    next_pc = ir_tmp(tmp_var_count++, pc.size-1, 0); \n    block->add_instr(bblkid, ir_add(next_pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    type = ir_cst(SYSCALL_X64_SYSCALL, 63, 0);\n    \n    /* Create interrupt */\n    block->add_instr(bblkid, ir_syscall(type, next_pc, addr));\n    return;\n}\n\ninline void x86_sysenter_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, type, next_pc;\n    \n    if( mode == CPUMode::X64 ){\n        throw unsupported_instruction_exception(\"SYSENTER: not supported in X64 mode\");\n    }\n    \n    /* Get operands */\n    pc = x86_get_pc(mode);\n    next_pc = ir_tmp(tmp_var_count++, pc.size-1, 0); \n    block->add_instr(bblkid, ir_add(next_pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    type = ir_cst(SYSCALL_X86_SYSENTER, 31, 0);\n    \n    /* Create interrupt */\n    block->add_instr(bblkid, ir_syscall(type, next_pc, addr));\n    return;\n}\n\ninline void x86_test_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand pc, op0, op1, tmp, cf, of;\n\n    /* Get operands */\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0): ir_var(X64_CF, 63, 0);\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0): ir_var(X64_OF, 63, 0);\n\n    // tmp <- op0 & op1\n    tmp = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_and(tmp, op0, op1, addr));\n\n    // Update flags (except AF that is undefined)\n    x86_set_pf( mode, tmp, addr, block, bblkid, tmp_var_count );\n    x86_set_zf( mode, tmp, addr, block, bblkid );\n    x86_set_sf( mode, tmp, addr, block, bblkid );\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(0, cf.size-1, 0), addr));\n    block->add_instr(bblkid, ir_mov(of, ir_cst(0, of.size-1, 0), addr));\n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n\n    return;\n}\n\ninline void x86_xadd_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, pc, tmp;\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n\n    /* Do the add */\n    res = ir_tmp(tmp_var_count++, op0.size-1, 0);\n    block->add_instr(bblkid, ir_add(res, op0, op1, addr));\n    \n    /* Update flags */\n    x86_set_zf(mode, res, addr, block, bblkid);\n    x86_add_set_cf(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    x86_add_set_af(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    x86_add_set_of(mode, op0, op1, res, addr, block, bblkid, tmp_var_count);\n    x86_set_sf(mode, res, addr, block, bblkid);\n    x86_set_pf(mode, res, addr, block, bblkid, tmp_var_count);\n    \n    /* Exchange operands */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        tmp = ir_tmp(tmp_var_count++, dest.size-1, 0);\n        block->add_instr(bblkid, ir_mov(tmp, dest, addr)); // In case dest is op1\n        block->add_instr(bblkid, ir_mov(op1, op0, addr));\n        block->add_instr(bblkid, ir_stm(tmp, res, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, op1, op0);\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    }\n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_xchg_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, pc, tmp, tmp2;\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    \n    tmp2 = ir_tmp(tmp_var_count++, op1.size-1, 0);\n    block->add_instr(bblkid, ir_mov(tmp2, op1, addr));\n    \n    /* Exchange operands */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        tmp = ir_tmp(tmp_var_count++, dest.size-1, 0);\n        block->add_instr(bblkid, ir_mov(tmp, dest, addr)); // In case dest is op1\n        block->add_instr(bblkid, ir_mov(op1, op0, addr));\n        block->add_instr(bblkid, ir_stm(tmp, tmp2, addr));\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, op1, op0);\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, tmp2);\n    }\n    \n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\ninline void x86_xor_d(CPUMode mode, cs_insn* instr, addr_t addr, IRBlock* block, IRBasicBlockId& bblkid , int& tmp_var_count){\n    IROperand op0, op1, dest, res, of, cf, pc;\n    of = (mode == CPUMode::X86)? ir_var(X86_OF, 31, 0) : ir_var(X64_OF, 63, 0);\n    cf = (mode == CPUMode::X86)? ir_var(X86_CF, 31, 0) : ir_var(X64_CF, 63, 0);\n    /* Get operands */\n    dest = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count);\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        op0 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[0]), block, bblkid, tmp_var_count, true);\n    }else{\n        op0 = dest;\n    }\n    op1 = x86_arg_translate(mode, addr, &(instr->detail->x86.operands[1]), block, bblkid, tmp_var_count, true);\n    /* Do the xor */\n    res = ir_tmp(tmp_var_count++, (instr->detail->x86.operands[0].size*8)-1, 0);\n    block->add_instr(bblkid, ir_xor(res, op0, op1, addr));\n    \n    /* Update flags: SF, ZF, PF */\n    x86_set_zf(mode, res, addr, block, bblkid);\n    x86_set_sf(mode, res, addr, block, bblkid);\n    x86_set_pf(mode, res, addr, block, bblkid, tmp_var_count);\n    /* OF and CF cleared */\n    block->add_instr(bblkid, ir_mov(of, ir_cst(0, of.high, of.low), addr));\n    block->add_instr(bblkid, ir_mov(cf, ir_cst(0, cf.high, cf.low), addr));\n    \n    /* Finally assign the result to the destination */ \n    /* If the add is written in memory */\n    if( instr->detail->x86.operands[0].type == X86_OP_MEM ){\n        block->add_instr(bblkid, ir_stm(dest, res, addr));\n    /* Else direct register assign */\n    }else{\n        x86_adjust_reg_assign(mode, addr, block, bblkid, tmp_var_count, dest, res);\n    }\n\n    // Update PC\n    pc = x86_get_pc(mode);\n    block->add_instr(bblkid, ir_add(pc, pc, ir_cst(instr->size, pc.size-1, 0), addr));\n    \n    return;\n}\n\n/* ==================================== */\n/* Disassembly wapper \n * \n * If sym is not null, then is_symbolic and is_tainted should not be null.\n * If they are not null, then the disassembler should check for symbolic/tainted \n * code and update the booleans accordingly. Disassembly ends immediately if \n * symbolic code is detected.\n * */\nIRBlock* DisassemblerX86::disasm_block(addr_t addr, code_t code, size_t code_size){\n    // Create new ir block\n    IRBlock * block = new IRBlock(\"\", addr);\n    IRBasicBlockId bblkid = block->new_bblock();\n    int tmp_var_count = 0;\n    addr_t curr_addr = addr;\n    bool stop = false;\n    stringstream asm_str;\n\n    while( (!stop) && cs_disasm_iter(_handle, (const uint8_t**)&code, &code_size, &addr, _insn) ){\n        // DEBUG\n        // std::cout << \"DEBUG, dissassembled \" << _insn->mnemonic << \" \" << _insn->op_str << std::endl;\n        \n        // Add instruction to IRBlock\n        switch(_insn->id){\n            case X86_INS_AAA:       x86_aaa_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_AAD:       x86_aad_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_AAM:       x86_aam_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_AAS:       x86_aas_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_ADC:       x86_adc_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_ADCX:      x86_adcx_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_ADD:       x86_add_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_AND:       x86_and_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_ANDN:      x86_andn_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BLSI:      x86_blsi_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BLSMSK:    x86_blsmsk_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BLSR:      x86_blsr_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BSF:       x86_bsf_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BSR:       x86_bsr_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BSWAP:     x86_bswap_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BT:        x86_bt_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BTC:       x86_btc_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BTR:       x86_btr_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BTS:       x86_bts_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_BZHI:      x86_bzhi_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CALL:      x86_call_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CBW:       x86_cbw_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CDQ:       x86_cdq_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CDQE:      x86_cdqe_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CLC:       x86_clc_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CLD:       x86_cld_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CLI:       x86_cli_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMC:       x86_cmc_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVA:     x86_cmova_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVAE:    x86_cmovae_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVB:     x86_cmovb_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVBE:    x86_cmovbe_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVE:     x86_cmove_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVG:     x86_cmovg_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVGE:    x86_cmovge_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVL:     x86_cmovl_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVLE:    x86_cmovle_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVNE:    x86_cmovne_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVNO:    x86_cmovno_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVNP:    x86_cmovnp_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVNS:    x86_cmovns_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVO:     x86_cmovo_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVP:     x86_cmovp_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMOVS:     x86_cmovs_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMP:       x86_cmp_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMPSB:     x86_cmpsb_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMPSD:     x86_cmpsd_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMPSQ:     x86_cmpsq_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMPSW:     x86_cmpsw_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CMPXCHG:   x86_cmpxchg_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CPUID:     x86_cpuid_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CQO:       x86_cqo_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CWD:       x86_cwd_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_CWDE:      x86_cwde_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_DEC:       x86_dec_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_DIV:       x86_div_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_IDIV:      x86_idiv_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_IMUL:      x86_imul_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_INC:       x86_inc_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_INT:       x86_int_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_INT3:      x86_int3_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JA:        x86_ja_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JAE:       x86_jae_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JB:        x86_jb_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JBE:       x86_jbe_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JCXZ:      x86_jcxz_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JE:        x86_je_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JECXZ:     x86_jecxz_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JG:        x86_jg_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JGE:       x86_jge_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JL:        x86_jl_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JLE:       x86_jle_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JMP:       x86_jmp_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JNE:       x86_jne_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JNO:       x86_jno_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JNP:       x86_jnp_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JNS:       x86_jns_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JO:        x86_jo_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JP:        x86_jp_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JRCXZ:     x86_jrcxz_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_JS:        x86_js_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_LAHF:      x86_lahf_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_LEA:       x86_lea_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_LEAVE:     x86_leave_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_LODSB:     x86_lodsb_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_LODSD:     x86_lodsd_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_LODSQ:     x86_lodsq_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_LODSW:     x86_lodsw_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_MOV:       x86_mov_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_MOVABS:    x86_mov_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break; // Just mov with 64b imm/mem\n            case X86_INS_MOVSB:     x86_movsb_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_MOVSD:     x86_movsd_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_MOVSQ:     x86_movsq_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_MOVSW:     x86_movsw_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_MOVSX:     x86_movsx_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_MOVSXD:    x86_movsxd_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_MOVZX:     x86_movzx_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_MUL:       x86_mul_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_NEG:       x86_neg_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_NOP:       x86_nop_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_NOT:       x86_not_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_OR:        x86_or_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_POP:       x86_pop_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_POPAL:     x86_popad_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_PUSH:      x86_push_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_PUSHAL:    x86_pushad_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_RCL:       x86_rcl_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_RCR:       x86_rcr_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_RDTSC:     x86_rdtsc_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_RET:       x86_ret_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_ROL:       x86_rol_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_ROR:       x86_ror_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SAL:       x86_sal_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SAR:       x86_sar_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SCASB:     x86_scasb_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SCASD:     x86_scasd_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SCASQ:     x86_scasq_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SCASW:     x86_scasw_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETA:      x86_seta_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETAE:     x86_setae_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETB:      x86_setb_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETBE:     x86_setbe_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETE:      x86_sete_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETG:      x86_setg_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETGE:     x86_setge_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETL:      x86_setl_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETLE:     x86_setle_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETNE:     x86_setne_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETNO:     x86_setno_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETNP:     x86_setnp_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETNS:     x86_setns_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETO:      x86_seto_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETP:      x86_setp_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SETS:      x86_sets_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SHL:       x86_sal_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break; // Same as SAL\n            case X86_INS_SHR:       x86_shr_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_STC:       x86_stc_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_STD:       x86_std_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_STI:       x86_sti_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_STOSB:     x86_stosb_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_STOSD:     x86_stosd_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_STOSQ:     x86_stosq_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_STOSW:     x86_stosw_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SUB:       x86_sub_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SYSENTER:  x86_sysenter_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_SYSCALL:   x86_syscall_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_TEST:      x86_test_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_XADD:      x86_xadd_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_XCHG:      x86_xchg_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            case X86_INS_XOR:       x86_xor_d(_mode, _insn, curr_addr, block, bblkid, tmp_var_count); break;\n            default: \n                string error_str = QuickFmt() << \"unsupported instruction \" << _insn->mnemonic >> QuickFmt::to_str;\n                throw unsupported_instruction_exception(error_str);\n        }\n        \n        // Update asm_str\n        asm_str << \" \" << _insn->mnemonic << \" \" << _insn->op_str << \";\";\n        \n        // Increment instruction count\n        block->_nb_instr++;\n        \n        // Stop if last operation is a branch operation \n        for( int i = 0; i < _insn->detail->groups_count; i++){\n            if(     _insn->detail->groups[i] == X86_GRP_JUMP ||\n                    _insn->detail->groups[i] == X86_GRP_CALL ||\n                    _insn->detail->groups[i] == X86_GRP_RET ||\n                    _insn->detail->groups[i] == X86_GRP_INT ||\n                    _insn->detail->groups[i] == X86_GRP_IRET /*||\n                    _insn->detail->groups[i] == X86_GRP_PRIVILEGE ||\n                    _insn->detail->groups[i] == X86_GRP_BRANCH_RELATIVE */\n            ){\n                stop = true;\n            }\n        }\n        curr_addr = addr;\n    }\n\n    // Check if we stopped for a legit reason or because capstone failed\n    if( !stop ){\n        throw runtime_exception(QuickFmt() << \n                \"DisassemblerX86:disasm_block(): capstone error at addr 0x\" << std::hex << curr_addr\n                >> QuickFmt::to_str );\n    }\n\n    /* Set some infos about the block */\n    block->end_addr = addr;\n    block->raw_size = block->start_addr - addr;\n\n    /* Save number of tmp variables */\n    block->_nb_tmp_vars = tmp_var_count;\n    \n    // Get number of IR instructions\n    block->_nb_instr_ir = 0;\n    for( auto bblk : block->bblocks()){\n        block->_nb_instr_ir += bblk.size();\n    }\n\n    // Set asm_str as name\n    string s = asm_str.str();\n    s = s.substr(1, s.size()-1-1); // Remove last ';'\n    if( s.back() == ' ' )\n        s.pop_back();\n    block->name = s;\n    return block;\n}\n"
  },
  {
    "path": "libropium/arch/disassembler.cpp",
    "content": "#include \"disassembler.hpp\"\n#include <iostream>\n\nDisassembler::~Disassembler(){\n        cs_free(_insn, 1);\n        _insn = nullptr;\n        cs_close(&_handle);\n}\n"
  },
  {
    "path": "libropium/compiler/compiler.cpp",
    "content": "#include \"compiler.hpp\"\n#include \"exception.hpp\"\n#include \"il.hpp\"\n#include <algorithm>\n\n\n/* ============= Compiler Task ============ */\nCompilerTask::CompilerTask(Arch* a):arch(a){}\n\nvoid CompilerTask::add_strategy(StrategyGraph* graph, int max_tries){\n    vector<StrategyGraph*>::iterator g;\n    if( pending_strategies.size() >= max_tries && pending_strategies.front()->size <= graph->size ){\n        // If strategy list is already full with smaller strategies, ignore this one\n        delete graph;\n        return;\n    }\n    for( g = pending_strategies.begin();\n         g != pending_strategies.end() && (*g)->size >= graph->size;\n         g++ ){}\n    pending_strategies.insert(g, graph);\n    \n}\n\nROPChain* CompilerTask::compile(Arch* arch, GadgetDB* db, Constraint* constraint, int nb_tries){\n    StrategyGraph* graph;\n    ROPChain* res = nullptr;\n    nb_tries = 3000;\n\n    // Set sigint handler to catch Ctrl+C\n    set_sigint_handler();\n\n    while( nb_tries-- > 0 && !pending_strategies.empty() && !res){\n        // Check if user entered Ctrl+C\n        if( is_pending_sigint() ){\n            notify_sigint_handled();\n            unset_signint_handler();\n            return nullptr;\n        }\n\n        graph = pending_strategies.back();\n        pending_strategies.pop_back();\n        if( graph->select_gadgets(*db, constraint, arch) ){\n            res = graph->get_ropchain(arch, constraint);\n        }else{\n            // Apply strategy rules to the graph to get new candidate strategies\n            apply_rules_to_graph(graph, nb_tries);\n        }\n        delete graph; graph = nullptr;\n    }\n    \n    // Restore original sigint handler\n    unset_signint_handler();\n\n    return res;\n}\n\n\nvoid CompilerTask::apply_rules_to_graph(StrategyGraph* graph, int max_tries){\n    StrategyGraph* new_graph;\n    vector<StrategyGraph*> new_list;\n\n    // Iterate through all nodes of the graph\n    for( Node& node : graph->nodes ){\n        if( node.is_disabled || node.is_indirect )\n            continue; // Skip invalid/removed nodes\n        // Apply strategy rules\n        // Generic transitivity\n        new_graph = graph->copy();\n        if( new_graph->rule_generic_transitivity(node.id)){\n            add_strategy(new_graph, max_tries);\n            new_graph = graph->copy();\n        }\n        // MovCst pop\n        if( new_graph->rule_mov_cst_pop(node.id, arch)){\n            add_strategy(new_graph, max_tries);\n            new_graph = graph->copy();\n        }\n        // Generic adjust_jmp\n        if( new_graph->rule_generic_adjust_jmp(node.id, arch)){\n            add_strategy(new_graph, max_tries);\n            new_graph = graph->copy();\n        }\n        // Adjust load\n        if( new_graph->rule_adjust_load(node.id, arch)){\n            add_strategy(new_graph, max_tries);\n            new_graph = graph->copy();\n        }\n        // Generic src reg transitivity\n        if( new_graph->rule_generic_src_transitivity(node.id)){\n            add_strategy(new_graph, max_tries);\n            new_graph = graph->copy();\n        }\n        // Adjust store\n        if( new_graph->rule_adjust_store(node.id, arch)){\n            add_strategy(new_graph, max_tries);\n            // Put new_graph = graph->copy() when adding more strategies\n        }else{\n            delete new_graph; new_graph = nullptr;\n        }\n    }\n}\n\nvoid CompilerTask::clear(){\n    for( StrategyGraph* g : pending_strategies ){\n        delete g; g = nullptr;\n    }\n    pending_strategies.clear();\n}\n\nCompilerTask::~CompilerTask(){\n    clear();\n}\n\n\n\n/* ============= ROPCompiler ============= */\nROPCompiler::ROPCompiler(Arch* a, GadgetDB *d):arch(a), db(d){}\n\nbool ROPCompiler::is_complex_instr(ILInstruction& instr, ABI abi){\n    if( instr.type == ILInstructionType::SYSCALL )\n        return true;\n    if( instr.type == ILInstructionType::FUNCTION ){\n        if( abi == ABI::X64_MS || abi == ABI::X64_SYSTEM_V ){\n            return true;\n        }\n    }\n    return false;\n}\n\nROPChain* ROPCompiler::compile(string program, Constraint* constraint, ABI abi, System system){\n    Constraint* tmp_constraint;\n    ROPChain* res;\n    vector<ILInstruction> final_instr;\n    vector<ILInstruction> instr = parse(program); // This raises il_exception if malformed program\n\n    // Add some general assertions\n    if( !constraint ){\n        tmp_constraint = new Constraint();\n        tmp_constraint->mem_safety.enable_unsafe();\n    }else{\n        tmp_constraint = constraint;\n    }\n    // Add generic constraints\n    tmp_constraint->mem_safety.add_safe_reg(arch->sp()); // Stack pointer is always safe for RW\n    \n\n    if( is_complex_instr(instr[0], abi)){\n        res = process_complex(instr, tmp_constraint, abi, system);\n    }else{\n        res = process_simple(instr, tmp_constraint, abi, system);\n    }\n    \n    if( !constraint ){\n        delete tmp_constraint;\n    }\n\n    return res;\n}\n\nROPChain* ROPCompiler::process_simple(vector<ILInstruction>& ins, Constraint* constraint, ABI abi, System system){\n    CompilerTask task = CompilerTask(arch);\n    ROPChain * res = nullptr, *tmp = nullptr;\n    vector<ILInstruction> instructions;\n    \n    // Preprocess instructions\n    preprocess(instructions, ins, constraint);\n\n    // Compile\n    for( ILInstruction& instr : instructions ){\n        task.clear();\n        il_to_strategy(task.pending_strategies, instr, constraint, abi, system);\n        if( (tmp = task.compile(arch, db, constraint)) != nullptr){\n            if( !res )\n                res = tmp;\n            else{\n                res->add_chain(*tmp);\n                delete tmp; tmp = nullptr;\n            }\n        }else{\n            delete res;\n            return nullptr;\n        }\n    }\n    return res;\n}\n\nROPChain* ROPCompiler::process_complex(vector<ILInstruction>& ins, Constraint* constraint, ABI abi, System system){\n    CompilerTask task = CompilerTask(arch);\n    ILInstruction instr =  ins[0];\n\n    if( instr.type == ILInstructionType::SYSCALL ){\n    // Syscalls\n        if( system == System::NONE ){\n            throw compiler_exception(\"Target OS must be specified to compile syscalls\");\n        }\n        if( arch->type == ArchType::X86 ){\n            switch( system ){\n                case System::LINUX: return _compile_x86_linux_syscall(instr, constraint);\n                default: throw compiler_exception(\"Syscalls are not supported for this system on X86\");\n            }\n        }else if( arch->type == ArchType::X64 ){\n            switch( system ){\n                case System::LINUX: return _compile_x64_linux_syscall(instr, constraint);\n                default: throw compiler_exception(\"Syscalls are not supported for this system on X64\");\n            }\n        }else{\n            throw runtime_exception(\"Syscalls are not supported for this architecture\");\n        }\n    }else if( instr.type == ILInstructionType::FUNCTION ){\n    // Functions\n        if( abi == ABI::NONE ){\n            throw compiler_exception(\"ABI must be specified to call functions\");\n        }\n        if( arch->type == ArchType::X86 ){\n            switch( abi ){\n                default: throw compiler_exception(\"This ABI is not supported for X86\");\n            }\n        }else if( arch->type == ArchType::X64 ){\n            switch( abi ){\n                case ABI::X64_SYSTEM_V: return _compile_x64_system_v_call(instr, constraint);\n                case ABI::X64_MS: return _compile_x64_ms_call(instr, constraint);\n                default: throw compiler_exception(\"This ABI is not supported for X64\");\n            }\n        }else{\n            throw runtime_exception(\"Function calls are not supported for this architecture\");\n        }\n    }\n    return nullptr;\n}\n\nbool _is_empty_line(string& s){\n    for( char& c : s ){\n        if( !isspace(c))\n            return false;\n    }\n    return true;\n}\n\nvector<ILInstruction> ROPCompiler::parse(string& program){\n    size_t pos;\n    string instr;\n    vector<ILInstruction> res;\n    pos = 0;\n    while( !program.empty() && pos != string::npos){\n        pos = program.find('\\n');\n        instr = program.substr(0, pos); \n        if( !_is_empty_line(instr)){\n            try{\n                ILInstruction ins = ILInstruction(*arch, instr);\n                res.push_back(ins);\n            }catch(il_exception const& e) {\n                throw il_exception(QuickFmt() << \"Invalid query: \" << instr >> QuickFmt::to_str);\n            }\n        }\n        program.erase(0, pos + 1);\n    }\n    return res;\n}\n\n\nbool _permutation_contains(vector<int>& perm1, vector<int>& perm2){\n\n    if( perm1.back() != perm2.back() )\n        return false; // Last = failed one, so if not the same we don't remove the recorded fail\n    \n    // Check perm2 is a prefix of perm1\n    for( auto i = perm2.begin(); i != perm2.end()-1; i++ ){\n        if( std::find(perm1.begin(), perm1.end()-1, *i) == (perm1.end()-1)){\n            return false;\n        }\n    }\n\n    return true;\n}\n\nvoid _record_failed_permutation(list<vector<int>>& failed_perms, vector<int>& perm){\n    failed_perms.remove_if(\n        [&perm](vector<int>& failed_perm){\n            return _permutation_contains(failed_perm, perm);\n        });\n    failed_perms.push_back(perm);\n}\n\nbool _is_failed_permutation(list<vector<int>>& failed_perms, vector<int>& perm){\n    for( auto failed_perm : failed_perms ){\n        if( _permutation_contains(perm, failed_perm)){\n            return true;\n        }\n    }\n    return false;\n}\n\n\nROPChain* ROPCompiler::_set_registers_permutation( vector<ILInstruction>& instr, vector<int>& permutation, Constraint* constraint, list<vector<int>>& failed_perms, bool& failed_on_first){\n    CompilerTask task(arch);\n    Constraint constr, tmp_constr;\n    ROPChain *res = nullptr, *chain=nullptr;\n    vector<int> tmp_perm;\n    vector<int> tmp_keep_regs;\n        \n    constr = *constraint; // Copy base constraint\n\n    for( int i = 0; i < permutation.size(); i++ ){\n        int idx = permutation[i];\n        tmp_perm.push_back(idx);\n        task.clear();\n        il_to_strategy(task.pending_strategies, instr[idx], &constr);\n        // Add the register args to the constr (don't modify them)\n        tmp_constr = constr;\n        tmp_keep_regs.clear();\n        for( int j = i+1; j < permutation.size(); j++ ){\n            if( instr[j].type == ILInstructionType::MOV_REG ){\n                tmp_constr.keep_regs.add_keep_reg(instr[j].args[PARAM_MOVREG_SRC_REG]);\n                tmp_keep_regs.push_back(instr[j].args[PARAM_MOVREG_SRC_REG]);\n            }\n        }\n        chain = task.compile(arch, db, &tmp_constr);\n        if( chain == nullptr ){\n            if( i == 0 && tmp_keep_regs.empty()){\n                // No chain to set he first register\n                failed_on_first = true;\n                return nullptr;\n            }\n            \n            // Add the additional future keep reg to tmp_perm when recording the fail\n            for( int keep : tmp_keep_regs ){\n                if( std::find(tmp_perm.begin(), tmp_perm.end(), keep) == tmp_perm.end())\n                    tmp_perm.push_back(keep);\n            }\n            // Record fail and return\n            _record_failed_permutation(failed_perms, tmp_perm);\n            delete res;\n            return nullptr;\n        }else{\n            // Add it to res\n            if( res == nullptr )\n                res = chain;\n            else\n                res->add_chain(*chain);\n            // Do not modify this dest reg later on\n            constr.keep_regs.add_keep_reg(instr[idx].args[0]); // Add the dst reg to keepregs\n        }\n    }\n    return res;\n}\n \n\n \nROPChain* ROPCompiler::_set_multiple_registers(vector<ILInstruction>& instr, Constraint* constraint){\n    ROPChain* res = nullptr;\n    vector<int> order;\n    list<vector<int>> failed_permutations;\n    bool failed_on_first = false;\n    for( int i = 0; i < instr.size(); i++)\n        order.push_back(i);\n\n    do{\n        if( !_is_failed_permutation(failed_permutations, order)){\n            res = _set_registers_permutation(instr, order, constraint, failed_permutations, failed_on_first);\n            if( res ){\n                break; // Found chain, stop searching\n            }else if( failed_on_first ){\n                return nullptr;\n            }\n        }\n    }while( std::next_permutation(order.begin(), order.end()));\n\n    return res;\n}\n\n\n\n\nbool ROPCompiler::_x86_cdecl_to_strategy(StrategyGraph& graph, ILInstruction& instr){\n    // Arguments pushed on the stack right to left\n    // Caller-cleanup = we have to set a proper gadget as return address to go to \n    // the next gadget in the ropchain\n    node_t n_ret = graph.new_node(GadgetType::LOAD);\n    node_t n = graph.new_node(GadgetType::MOV_CST);\n    Node& node_ret = graph.nodes[n_ret];\n    Node& node = graph.nodes[n];\n    // Add the 'ret' gadget that will \n    node_ret.is_indirect = true; // Indirect\n    node_ret.params[PARAM_LOAD_DST_REG].make_reg(X86_EIP);\n    node_ret.params[PARAM_LOAD_SRC_ADDR_REG].make_reg(X86_ESP);\n    // For return gadget, skip all the arguments and return \n    // (nb args is args.size() -1 because 1rst arg is the function address)\n    node_ret.params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst(arch->octets*(instr.args.size()-1), graph.new_name(\"stack_offset\"));    \n\n    // Main node\n    /* Arguments are on the stack, pushed right to left */\n    \n    \n    node.params[PARAM_MOVCST_DST_REG].make_reg(X86_EIP);\n    node.params[PARAM_MOVCST_SRC_CST].make_cst(instr.args[PARAM_FUNCTION_ADDR], graph.new_name(\"function_address\"));\n    // Add parameters at the final sp_inc of the gadget\n    for( int i = 1; i < instr.args.size(); i++){\n        node.special_paddings.push_back(ROPPadding());\n        // The offset is sp_inc + arch_size_bytes*(param_num+1) (+1 because return address comes before args)\n        node.special_paddings.back().offset.make_cst(\n            node.id, PARAM_MOVCST_GADGET_SP_INC,\n            exprvar(arch->bits, node.params[PARAM_MOVCST_GADGET_SP_INC].name) + (arch->octets * ((i-1)+1)),\n            graph.new_name(\"func_arg_offset\")\n        );\n        if( instr.args_type[i] == IL_FUNC_ARG_CST ){\n            node.special_paddings.back().value.make_cst(instr.args[i], graph.new_name(\"func_arg\"));\n        }else{\n            // Putting the registers on the stack then call a function isn't supported\n            return false;\n        }\n    }\n    // Add constraint to check that the sp-delta of the gadget is 0\n    node.assigned_gadget_constraints.push_back(\n        // The gadget should have a sp_delta == 0 (otherwise the arguments won't be in the right place when\n        // jumping to the function\n        [](Node* n, StrategyGraph* g, Arch* arch)->bool{\n            return n->affected_gadget->max_sp_inc == n->affected_gadget->sp_inc;\n        }\n    );\n\n    // Add the 'ret' gadget address as first padding of the first gadget :)\n    node.special_paddings.push_back(ROPPadding());\n    node.special_paddings.back().offset.make_cst(\n            node.id, PARAM_MOVCST_GADGET_SP_INC,\n            exprvar(arch->bits, node.params[PARAM_MOVCST_GADGET_SP_INC].name),\n            graph.new_name(\"func_ret_addr_offset\")\n        );\n    node.special_paddings.back().value.make_cst(node_ret.id, PARAM_LOAD_GADGET_ADDR,\n        exprvar(arch->bits, node_ret.params[PARAM_LOAD_GADGET_ADDR].name), graph.new_name(\"func_ret_addr\"));\n\n    // Add mandatory following node\n    node.mandatory_following_node = node_ret.id;\n\n    return true;\n}\n\nbool ROPCompiler::_x86_stdcall_to_strategy(StrategyGraph& graph, ILInstruction& instr){\n    // Similar to cdecl but easier since it's a callee-cleaup convention so we just need\n    // a 'ret' as return gadget and don't need to adapt it to the number of arguments\n    node_t n_ret = graph.new_node(GadgetType::LOAD);\n    node_t n = graph.new_node(GadgetType::MOV_CST);\n    Node& node_ret = graph.nodes[n_ret];\n    Node& node = graph.nodes[n];\n    // Add the 'ret' gadget\n    node_ret.is_indirect = true; // Indirect\n    node_ret.params[PARAM_LOAD_DST_REG].make_reg(X86_EIP);\n    node_ret.params[PARAM_LOAD_SRC_ADDR_REG].make_reg(X86_ESP);\n    node_ret.params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst(0, graph.new_name(\"stack_offset\"));    \n\n    // Main node\n    /* Arguments are on the stack, pushed right to left */\n    node.params[PARAM_MOVCST_DST_REG].make_reg(X86_EIP);\n    node.params[PARAM_MOVCST_SRC_CST].make_cst(instr.args[PARAM_FUNCTION_ADDR], graph.new_name(\"func_address\"));\n    // Add parameters at the final sp_inc of the gadget\n    for( int i = 1; i < instr.args.size(); i++){\n        node.special_paddings.push_back(ROPPadding());\n        // The offset is sp_inc + arch_size_bytes*(param_num+1) (+1 because return address comes before args)\n        node.special_paddings.back().offset.make_cst(\n            node.id, PARAM_MOVCST_GADGET_SP_INC,\n            exprvar(arch->bits, node.params[PARAM_MOVCST_GADGET_SP_INC].name) + (arch->octets * ((i-1)+1)),\n            graph.new_name(\"func_arg_offset\")\n        );\n        if( instr.args_type[i] == IL_FUNC_ARG_CST ){\n            node.special_paddings.back().value.make_cst(instr.args[i], graph.new_name(\"func_arg\"));\n        }else{\n            // Putting the registers on the stack then call a function isn't supported\n            return false;\n        }\n    }\n    // Add constraint to check that the sp-delta of the gadget is 0\n    node.assigned_gadget_constraints.push_back(\n        // The gadget should have a sp_delta == 0 (otherwise the arguments won't be in the right place when\n        // jumping to the function\n        [](Node* n, StrategyGraph* g, Arch* arch)->bool{\n            return n->affected_gadget->max_sp_inc == n->affected_gadget->sp_inc;\n        }\n    );\n\n    // Add the 'ret' gadget address as first padding of the first gadget :)\n    node.special_paddings.push_back(ROPPadding());\n    node.special_paddings.back().offset.make_cst(\n            node.id, PARAM_MOVCST_GADGET_SP_INC,\n            exprvar(arch->bits, node.params[PARAM_MOVCST_GADGET_SP_INC].name),\n            graph.new_name(\"func_ret_addr_offset\")\n        );\n    node.special_paddings.back().value.make_cst(node_ret.id, PARAM_LOAD_GADGET_ADDR,\n        exprvar(arch->bits, node_ret.params[PARAM_LOAD_GADGET_ADDR].name), graph.new_name(\"func_ret_addr\"));\n\n    // Add mandatory following node\n    node.mandatory_following_node = node_ret.id;\n\n    return true;\n}\n\nROPChain* ROPCompiler::_compile_x64_system_v_call(ILInstruction& instr, Constraint* constraint){\n    // First 6 args in RDI,RSI,RDX,RCX,R8,R9 then on the stack pushed right to left\n    node_t call_node, ret_node;\n    int arg_regs[6] = {X64_RDI, X64_RSI, X64_RDX, X64_RCX, X64_R8, X64_R9};\n    int nb_args_on_stack;\n    CompilerTask task(arch);\n    ROPChain *res=nullptr, *tmp=nullptr;\n    StrategyGraph *graph = new StrategyGraph();\n    vector<ILInstruction> set_regs_instr;\n    Constraint call_constr;\n    \n    // Get base constraint\n    call_constr = *constraint; // Constraint for the call node with the keepregs set\n\n    // Create node for strategy graph to call the function (with padding etc)\n    if( instr.args.size()-1 > 6 )\n        nb_args_on_stack = instr.args.size()-1 - 6;\n    else\n        nb_args_on_stack = 0;\n\n    // Add the 'ret' gadget\n    ret_node = graph->new_node(GadgetType::LOAD);\n    graph->nodes[ret_node].is_indirect = true; // Indirect\n    graph->nodes[ret_node].params[PARAM_LOAD_DST_REG].make_reg(X64_RIP);\n    graph->nodes[ret_node].params[PARAM_LOAD_SRC_ADDR_REG].make_reg(X64_RSP);\n    graph->nodes[ret_node].params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst(nb_args_on_stack*arch->octets, graph->new_name(\"stack_offset\"));\n\n    // Add the call node\n    call_node = graph->new_node(GadgetType::MOV_CST);\n    graph->nodes[call_node].params[PARAM_MOVCST_DST_REG].make_reg(X64_RIP);\n    graph->nodes[call_node].params[PARAM_MOVCST_SRC_CST].make_cst(instr.args[PARAM_FUNCTION_ADDR], graph->new_name(\"func_address\"));\n    // Add constraint to check that the sp-delta of the gadget is 0\n    graph->nodes[call_node].assigned_gadget_constraints.push_back(\n        // The gadget should have a sp_delta == 0 (otherwise the arguments won't be in the right place when\n        // jumping to the function\n        [](Node* n, StrategyGraph* g, Arch* arch)->bool{\n            return n->affected_gadget->max_sp_inc == n->affected_gadget->sp_inc;\n        }\n    );\n\n    // If needed add special paddings for extra args (after the 6th argument)\n    for( int i = 0; i < nb_args_on_stack; i++){\n        graph->nodes[call_node].special_paddings.push_back(ROPPadding());\n        // The offset is sp_inc + arch_size_bytes*(param_num+1) (+1 because return address comes before args)\n        graph->nodes[call_node].special_paddings.back().offset.make_cst(\n            call_node, PARAM_MOVCST_GADGET_SP_INC,\n            exprvar(arch->bits, graph->nodes[call_node].params[PARAM_MOVCST_GADGET_SP_INC].name) + (arch->octets * (i+1)),\n            graph->new_name(\"func_arg_offset\")\n        );\n        if( instr.args_type[PARAM_FUNCTION_ARGS+6+i] == IL_FUNC_ARG_CST ){\n            graph->nodes[call_node].special_paddings.back().value.make_cst(instr.args[PARAM_FUNCTION_ARGS+6+i], graph->new_name(\"func_arg\"));\n        }else{\n            // Putting the registers on the stack then call a function isn't supported\n            delete graph;\n            return nullptr;\n        }\n    }\n    // Add the 'ret' gadget address as first padding of the first gadget :)\n    graph->nodes[call_node].special_paddings.push_back(ROPPadding());\n    graph->nodes[call_node].special_paddings.back().offset.make_cst(\n            call_node, PARAM_MOVCST_GADGET_SP_INC,\n            exprvar(arch->bits, graph->nodes[call_node].params[PARAM_MOVCST_GADGET_SP_INC].name),\n            graph->new_name(\"func_ret_addr_offset\")\n        );\n    graph->nodes[call_node].special_paddings.back().value.make_cst(ret_node, PARAM_LOAD_GADGET_ADDR,\n        exprvar(arch->bits, graph->nodes[ret_node].params[PARAM_LOAD_GADGET_ADDR].name), graph->new_name(\"func_ret_addr\"));\n\n    // Add 'ret' node as mandatory following node\n    graph->nodes[call_node].mandatory_following_node = ret_node;\n\n    // Create a vector of instructions to set the register arguments\n    for( int i = 0; i < 6 && (i < instr.args.size()-PARAM_FUNCTION_ARGS); i++){\n        // Set register that must hold the argument\n        if( instr.args_type[PARAM_FUNCTION_ARGS+i] == IL_FUNC_ARG_CST ){\n            set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_CST));\n            set_regs_instr.back().args = {arg_regs[i], instr.args[PARAM_FUNCTION_ARGS+i]};\n        }else{\n            set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_REG));\n            set_regs_instr.back().args = {arg_regs[i], instr.args[PARAM_FUNCTION_ARGS+i]};\n        }\n        call_constr.keep_regs.add_keep_reg(arg_regs[i]);\n    }\n\n    \n    // Get the chain that sets the registers\n    res = _set_multiple_registers(set_regs_instr, constraint);\n    if( !res ){\n        delete graph;\n        return nullptr;\n    }\n    \n    // Get the chain that calls the function\n    task.clear();\n    task.pending_strategies.push_back(graph);\n    tmp = task.compile(arch, db, &call_constr);\n\n    if( !tmp ){\n        delete res;\n        res = nullptr;\n        return nullptr;\n    }\n    // Sucess, return chain!\n    res->add_chain(*tmp);\n\n    return res;\n}\n\n\nROPChain* ROPCompiler::_compile_x64_ms_call(ILInstruction& instr, Constraint* constraint){\n    //  Similar to system_v but only 4 args passed in RDX,RCX,R8,R9 then on the stack pushed right to left\n    // (Code is almost identical to _x64_system_v_to_strategy, only number of stack regs changes, \n    //  it could be factorized in the future if needed)\n    node_t call_node, ret_node;\n    int arg_regs[4] = {X64_RDX, X64_RCX, X64_R8, X64_R9};\n    int nb_args_on_stack;\n    CompilerTask task(arch);\n    ROPChain *res=nullptr, *tmp=nullptr;\n    StrategyGraph *graph = new StrategyGraph();\n    vector<ILInstruction> set_regs_instr;\n    Constraint call_constr;\n    \n    // Get base constraint\n    call_constr = *constraint; // Constraint for the call node with the keepregs set\n\n    if( instr.args.size()-1 > 4 )\n        nb_args_on_stack = instr.args.size()-1 - 4;\n    else\n        nb_args_on_stack = 0;\n\n    // Add the 'ret' gadget\n    ret_node = graph->new_node(GadgetType::LOAD);\n    graph->nodes[ret_node].is_indirect = true; // Indirect\n    graph->nodes[ret_node].params[PARAM_LOAD_DST_REG].make_reg(X64_RIP);\n    graph->nodes[ret_node].params[PARAM_LOAD_SRC_ADDR_REG].make_reg(X64_RSP);\n    graph->nodes[ret_node].params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst(nb_args_on_stack*arch->octets, graph->new_name(\"stack_offset\"));\n\n    // Add the call node\n    call_node = graph->new_node(GadgetType::MOV_CST);\n    graph->nodes[call_node].params[PARAM_MOVCST_DST_REG].make_reg(X64_RIP);\n    graph->nodes[call_node].params[PARAM_MOVCST_SRC_CST].make_cst(instr.args[PARAM_FUNCTION_ADDR], graph->new_name(\"func_address\"));\n    // Add constraint to check that the sp-delta of the gadget is 0\n    graph->nodes[call_node].assigned_gadget_constraints.push_back(\n        // The gadget should have a sp_delta == 0 (otherwise the arguments won't be in the right place when\n        // jumping to the function\n        [](Node* n, StrategyGraph* g, Arch* arch)->bool{\n            return n->affected_gadget->max_sp_inc == n->affected_gadget->sp_inc;\n        }\n    );\n\n    // If needed add special paddings for extra args (after the 4th argument)\n    for( int i = 0; i < nb_args_on_stack; i++){\n        graph->nodes[call_node].special_paddings.push_back(ROPPadding());\n        // The offset is sp_inc + arch_size_bytes*(param_num+1) (+1 because return address comes before args)\n        graph->nodes[call_node].special_paddings.back().offset.make_cst(\n            call_node, PARAM_MOVCST_GADGET_SP_INC,\n            exprvar(arch->bits, graph->nodes[call_node].params[PARAM_MOVCST_GADGET_SP_INC].name) + (arch->octets * (i+1)),\n            graph->new_name(\"func_arg_offset\")\n        );\n        if( instr.args_type[PARAM_FUNCTION_ARGS+4+i] == IL_FUNC_ARG_CST ){\n            graph->nodes[call_node].special_paddings.back().value.make_cst(instr.args[PARAM_FUNCTION_ARGS+4+i], graph->new_name(\"func_arg\"));\n        }else{\n            // Putting the registers on the stack then call a function isn't supported\n            delete graph;\n            return nullptr;\n        }\n    }\n    // Add the 'ret' gadget address as first padding of the first gadget :)\n    graph->nodes[call_node].special_paddings.push_back(ROPPadding());\n    graph->nodes[call_node].special_paddings.back().offset.make_cst(\n            call_node, PARAM_MOVCST_GADGET_SP_INC,\n            exprvar(arch->bits, graph->nodes[call_node].params[PARAM_MOVCST_GADGET_SP_INC].name),\n            graph->new_name(\"func_ret_addr_offset\")\n        );\n    graph->nodes[call_node].special_paddings.back().value.make_cst(ret_node, PARAM_LOAD_GADGET_ADDR,\n        exprvar(arch->bits, graph->nodes[ret_node].params[PARAM_LOAD_GADGET_ADDR].name), graph->new_name(\"func_ret_addr\"));\n\n    // Add 'ret' node as mandatory following node\n    graph->nodes[call_node].mandatory_following_node = ret_node;\n\n    // Create a vector of instructions to set the register arguments\n    for( int i = 0; i < 4 && (i < instr.args.size()-PARAM_FUNCTION_ARGS); i++){\n        // Set register that must hold the argument\n        if( instr.args_type[PARAM_FUNCTION_ARGS+i] == IL_FUNC_ARG_CST ){\n            set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_CST));\n            set_regs_instr.back().args = {arg_regs[i], instr.args[PARAM_FUNCTION_ARGS+i]};\n        }else{\n            set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_REG));\n            set_regs_instr.back().args = {arg_regs[i], instr.args[PARAM_FUNCTION_ARGS+i]};\n        }\n        call_constr.keep_regs.add_keep_reg(arg_regs[i]);\n    }\n\n    \n    // Get the chain that sets the registers\n    res = _set_multiple_registers(set_regs_instr, constraint);\n    if( !res ){\n        delete graph;\n        return nullptr;\n    }\n    \n    // Get the chain that calls the function\n    task.clear();\n    task.pending_strategies.push_back(graph);\n    tmp = task.compile(arch, db, &call_constr);\n\n    if( !tmp ){\n        delete res;\n        res = nullptr;\n        return nullptr;\n    }\n    // Sucess, return chain!\n    res->add_chain(*tmp);\n\n    return res;\n}\n\nROPChain* ROPCompiler::_compile_x86_linux_syscall(ILInstruction& instr, Constraint* constraint){\n    // Get syscall def for this syscall\n    SyscallDef* def;\n    bool def_by_name;\n    int syscall_num;\n    CompilerTask task(arch);\n    vector<ILInstruction> set_regs_instr;\n    ROPChain *res, *syscall_chain;\n    Constraint syscall_constraint = *constraint;\n\n    def_by_name = !instr.syscall_name.empty();\n    \n    if( def_by_name ){\n        def = get_syscall_def(ArchType::X64, System::LINUX, instr.syscall_name);\n        if( def == nullptr ){\n            throw compiler_exception(QuickFmt() << \"Syscall '\" << instr.syscall_name << \"' is not supported\");\n        }\n        syscall_num = def->num;\n    }else{\n        syscall_num = instr.syscall_num;\n    }\n\n    int arg_regs[6] = {X86_EBX, X86_ECX, X86_EDX, X86_ESI, X86_EDI, X86_EBP};\n\n    if( instr.args.size() > 6 )\n        throw compiler_exception(\"X86 syscalls can not take more than 6 arguments\");\n    else if( def_by_name && instr.args.size() != def->nb_args ){\n        throw compiler_exception(QuickFmt() << \"Syscall \" << def->name << \"() expects \" << std::dec << \n                def->nb_args << \" arguments (got \" << instr.args.size() << \")\" >> QuickFmt::to_str );\n    }\n    \n    \n    // Create vector of instructions to put the first 6 args in registers\n    for( int i = 0; i < 6 && (i < instr.args.size()-PARAM_SYSCALL_ARGS); i++){\n        // Set register that must hold the argument\n        if( instr.args_type[PARAM_SYSCALL_ARGS+i] == IL_FUNC_ARG_CST ){\n            set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_CST));\n            set_regs_instr.back().args = {arg_regs[i], instr.args[PARAM_SYSCALL_ARGS+i]}; // MOV_CST args\n        }else{\n            set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_REG));\n            set_regs_instr.back().args = {arg_regs[i], instr.args[PARAM_SYSCALL_ARGS+i]}; // MOV_CST args\n        }\n        syscall_constraint.keep_regs.add_keep_reg(arg_regs[i]);\n    }\n    \n    // Add instruction to put syscall number in eax\n    set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_CST));\n    set_regs_instr.back().args = {X86_EAX, syscall_num}; // Syscall num in EAX\n\n    // Get the chain to set all the registers\n    res = _set_multiple_registers(set_regs_instr, constraint);\n    if( !res ){\n        return nullptr;\n    }\n\n    // Get the chain to make the syscall\n    syscall_chain = compile(\"syscall\", &syscall_constraint);\n    if( ! syscall_chain ){\n        delete res;\n        return nullptr;\n    }else{\n        res->add_chain(*syscall_chain);\n    }\n\n    return res;\n}\n\nROPChain* ROPCompiler::_compile_x64_linux_syscall(ILInstruction& instr, Constraint* constraint){\n    // Get syscall def for this syscall\n    SyscallDef* def;\n    bool def_by_name;\n    int syscall_num;\n    CompilerTask task(arch);\n    vector<ILInstruction> set_regs_instr;\n    ROPChain *res, *syscall_chain;\n    Constraint syscall_constraint = *constraint;\n\n    def_by_name = !instr.syscall_name.empty();\n    \n    if( def_by_name ){\n        def = get_syscall_def(ArchType::X64, System::LINUX, instr.syscall_name);\n        if( def == nullptr ){\n            throw compiler_exception(QuickFmt() << \"Syscall '\" << instr.syscall_name << \"' is not supported\");\n        }\n        syscall_num = def->num;\n    }else{\n        syscall_num = instr.syscall_num;\n    }\n\n    int arg_regs[6] = {X64_RDI, X64_RSI, X64_RDX, X64_R10, X64_R8, X64_R9};\n\n    if( instr.args.size() > 6 )\n        throw compiler_exception(\"X64 syscalls can not take more than 6 arguments\");\n    else if( def_by_name && instr.args.size() != def->nb_args ){\n        throw compiler_exception(QuickFmt() << \"Syscall \" << def->name << \"() expects \" << std::dec << \n                def->nb_args << \" arguments (got \" << instr.args.size() << \")\" >> QuickFmt::to_str );\n    }\n    \n    \n    // Create vector of instructions to put the first 6 args in registers\n    for( int i = 0; i < 6 && (i < instr.args.size()-PARAM_SYSCALL_ARGS); i++){\n        // Set register that must hold the argument\n        if( instr.args_type[PARAM_SYSCALL_ARGS+i] == IL_FUNC_ARG_CST ){\n            set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_CST));\n            set_regs_instr.back().args = {arg_regs[i], instr.args[PARAM_SYSCALL_ARGS+i]}; // MOV_CST args\n        }else{\n            set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_REG));\n            set_regs_instr.back().args = {arg_regs[i], instr.args[PARAM_SYSCALL_ARGS+i]}; // MOV_CST args\n        }\n        syscall_constraint.keep_regs.add_keep_reg(arg_regs[i]);\n    }\n    \n    // Add instruction to put syscall number in eax\n    set_regs_instr.push_back(ILInstruction(ILInstructionType::MOV_CST));\n    set_regs_instr.back().args = {X64_RAX, syscall_num}; // Syscall num in RAX\n\n    // Get the chain to set all the registers\n    res = _set_multiple_registers(set_regs_instr, constraint);\n    if( !res ){\n        return nullptr;\n    }\n\n    // Get the chain to make the syscall\n    syscall_chain = compile(\"syscall\", &syscall_constraint);\n    if( ! syscall_chain ){\n        delete res;\n        return nullptr;\n    }else{\n        res->add_chain(*syscall_chain);\n    }\n\n    return res;\n}\n\n\nbool _string_to_integers(vector<cst_t>& integers, string& str, int arch_octets, Constraint* constraint){\n    // Assuming little endian\n    int i = 0, j; \n    cst_t val;\n    unsigned char padding_byte = 0xff; // Default\n    \n    if( constraint != nullptr ){\n        try{\n            padding_byte = constraint->bad_bytes.get_valid_byte();\n        }catch(runtime_exception& e){\n            return false;\n        }\n    }\n\n    while( i < str.size() ){\n        val = 0;\n        for( j = 0; j < arch_octets && i < str.size(); j++){\n            val += ((cst_t)str[i++]) << (j*8);\n        }\n        // Check if full value\n        if( j != arch_octets ){\n            // Adjust value\n            for( ; j < arch_octets; j++){\n                val += ((cst_t)padding_byte) << (j*8);\n            }\n        }\n        integers.push_back(val);\n    }\n    return true;\n}\n\nbool _cst_store_cst_to_strategy(StrategyGraph& graph, ILInstruction& instr, Arch* arch){\n    node_t n1 = graph.new_node(GadgetType::STORE);\n    node_t n2 = graph.new_node(GadgetType::MOV_CST);\n    node_t n3 = graph.new_node(GadgetType::MOV_CST);\n    Node& node1 = graph.nodes[n1];\n    Node& node2 = graph.nodes[n2];\n    Node& node3 = graph.nodes[n3];\n    node1.branch_type = BranchType::RET;\n    node2.branch_type = BranchType::RET;\n    node3.branch_type = BranchType::RET;\n    // First node is mem(X + C) <- reg\n    // Second is X <- src_cst - C \n    node1.params[PARAM_STORE_SRC_REG].make_reg(-1, false); // Free reg\n    node1.params[PARAM_STORE_DST_ADDR_REG].make_reg(-1, false); // Free\n    node1.params[PARAM_STORE_DST_ADDR_OFFSET].make_cst(-1, graph.new_name(\"offset\"), false);\n    node1.strategy_constraints.push_back(\n        // Can not adjust the addr_reg if it is the same as the reg that must be written\n        // (i.e mov [ecx+8], ecx can't become mov [0x12345678], ecx\n        [](Node* n, StrategyGraph* g, Arch* arch)->bool{\n            return n->params[PARAM_STORE_DST_ADDR_REG].value != n->params[PARAM_STORE_SRC_REG].value;\n        }\n    );\n    node1.node_assertion.valid_pointers.add_valid_pointer(PARAM_STORE_DST_ADDR_REG);\n    \n    node2.params[PARAM_MOVCST_DST_REG].make_reg(n1, PARAM_STORE_DST_ADDR_REG); // node2 X is same as addr reg in node1\n    node2.params[PARAM_MOVCST_DST_REG].is_data_link = true;\n    node2.params[PARAM_MOVCST_SRC_CST].make_cst(n1, PARAM_STORE_DST_ADDR_OFFSET, \n        instr.args[PARAM_CSTSTORECST_DST_ADDR_OFFSET] - exprvar(arch->bits, node1.params[PARAM_STORE_DST_ADDR_OFFSET].name)\n        , graph.new_name(\"cst\")); // node2 cst is the target const C minus the offset in the node1 load\n    \n    node3.params[PARAM_MOVCST_DST_REG].make_reg(n1, PARAM_STORE_SRC_REG);\n    node3.params[PARAM_MOVCST_DST_REG].is_data_link = true;\n    node3.params[PARAM_MOVCST_SRC_CST].make_cst(instr.args[PARAM_CSTSTORECST_SRC_CST], graph.new_name(\"cst\"));\n\n    graph.add_param_edge(n2, n1);\n    graph.add_strategy_edge(n2, n1);\n    graph.add_param_edge(n3, n1);\n    graph.add_strategy_edge(n3, n1);\n    \n    return true;\n}\n\n\nbool _preprocess_cst_store_string(vector<ILInstruction>& dst, ILInstruction& instr, Arch* arch, Constraint* constraint){\n    vector<cst_t> integers;\n\n    if( !_string_to_integers(integers, instr.str, arch->octets, constraint)){\n        return false;\n    }\n    // For each integer, add node to store it to the correct address :)\n    for( int i = 0; i < integers.size(); i++ ){\n        cst_t addr_offset = instr.args[PARAM_CSTSTORE_STRING_ADDR_OFFSET] + (i*arch->octets);\n        cst_t src_cst = integers[i];\n        vector<cst_t> store_cst_args = {addr_offset, src_cst};\n        ILInstruction il_instr = ILInstruction(ILInstructionType::CST_STORE_CST, &store_cst_args);\n        dst.push_back(il_instr);\n    }\n    return true;\n}\n\nbool ROPCompiler::preprocess(vector<ILInstruction>& dst, vector<ILInstruction>& src, Constraint* constraint){\n    for( ILInstruction& instr : src ){\n        if( instr.type == ILInstructionType::CST_STORE_STRING ){\n            // Splite a store string into several smaller ones\n            if( ! _preprocess_cst_store_string(dst, instr, arch, constraint))\n                return false;\n        }else{\n            // Just copy it\n            dst.push_back(instr);\n        }\n    }\n    return true;\n}\n\n\nvoid ROPCompiler::il_to_strategy(vector<StrategyGraph*>& graphs, ILInstruction& instr, Constraint* constraint, ABI abi, System system){\n    StrategyGraph* graph;\n    if( instr.type == ILInstructionType::MOV_CST ){\n        // MOV_CST\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::MOV_CST);\n        Node& node = graph->nodes[n];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_MOVCST_DST_REG].make_reg(instr.args[PARAM_MOVCST_DST_REG]);\n        node.params[PARAM_MOVCST_DST_REG].is_data_link = true;\n        node.params[PARAM_MOVCST_SRC_CST].make_cst(instr.args[PARAM_MOVCST_SRC_CST], graph->new_name(\"cst\"));\n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::MOV_REG ){\n        // MOV_REG\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::MOV_REG);\n        Node& node = graph->nodes[n];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_MOVREG_DST_REG].make_reg(instr.args[PARAM_MOVREG_DST_REG]);\n        node.params[PARAM_MOVREG_DST_REG].is_data_link = true;\n        node.params[PARAM_MOVREG_SRC_REG].make_reg(instr.args[PARAM_MOVREG_SRC_REG]);\n        node.params[PARAM_MOVREG_SRC_REG].is_data_link = true;\n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::AMOV_CST){\n        // AMOV_CST\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::AMOV_CST);\n        Node& node = graph->nodes[n];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_AMOVCST_DST_REG].make_reg(instr.args[PARAM_AMOVCST_DST_REG]);\n        node.params[PARAM_AMOVCST_DST_REG].is_data_link = true;\n        node.params[PARAM_AMOVCST_SRC_REG].make_reg(instr.args[PARAM_AMOVCST_SRC_REG]);\n        node.params[PARAM_AMOVCST_SRC_OP].make_op((Op)instr.args[PARAM_AMOVCST_SRC_OP]);\n        node.params[PARAM_AMOVCST_SRC_CST].make_cst(instr.args[PARAM_AMOVCST_SRC_CST], graph->new_name(\"cst\"));\n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::AMOV_REG){\n        // AMOV_REG\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::AMOV_REG);\n        Node& node = graph->nodes[n];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_AMOVREG_DST_REG].make_reg(instr.args[PARAM_AMOVREG_DST_REG]);\n        node.params[PARAM_AMOVREG_DST_REG].is_data_link = true;\n        node.params[PARAM_AMOVREG_SRC_REG1].make_reg(instr.args[PARAM_AMOVREG_SRC_REG1]);\n        node.params[PARAM_AMOVREG_SRC_REG1].is_data_link = true;\n        node.params[PARAM_AMOVREG_SRC_OP].make_op((Op)instr.args[PARAM_AMOVREG_SRC_OP]);\n        node.params[PARAM_AMOVREG_SRC_REG2].make_reg(instr.args[PARAM_AMOVREG_SRC_REG2]);\n        node.params[PARAM_AMOVREG_SRC_REG2].is_data_link = true;\n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::LOAD ){\n        // LOAD\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::LOAD);\n        Node& node = graph->nodes[n];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_LOAD_DST_REG].make_reg(instr.args[PARAM_LOAD_DST_REG]);\n        node.params[PARAM_LOAD_DST_REG].is_data_link = true;\n        node.params[PARAM_LOAD_SRC_ADDR_REG].make_reg(instr.args[PARAM_LOAD_SRC_ADDR_REG]);\n        node.params[PARAM_LOAD_SRC_ADDR_REG].is_data_link = true;\n        node.params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst(instr.args[PARAM_LOAD_SRC_ADDR_OFFSET], graph->new_name(\"offset\"));\n        node.node_assertion.valid_pointers.add_valid_pointer(PARAM_LOAD_SRC_ADDR_REG);\n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::ALOAD ){\n        // ALOAD\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::ALOAD);\n        Node& node = graph->nodes[n];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_ALOAD_DST_REG].make_reg(instr.args[PARAM_LOAD_DST_REG]);\n        node.params[PARAM_ALOAD_DST_REG].is_data_link = true;\n        node.params[PARAM_ALOAD_OP].make_op((Op)instr.args[PARAM_ALOAD_OP]);\n        node.params[PARAM_ALOAD_SRC_ADDR_REG].make_reg(instr.args[PARAM_ALOAD_SRC_ADDR_REG]);\n        node.params[PARAM_ALOAD_SRC_ADDR_REG].is_data_link = true;\n        node.params[PARAM_ALOAD_SRC_ADDR_OFFSET].make_cst(instr.args[PARAM_ALOAD_SRC_ADDR_OFFSET], graph->new_name(\"offset\"));\n        node.node_assertion.valid_pointers.add_valid_pointer(PARAM_ALOAD_SRC_ADDR_REG);\n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::LOAD_CST ){\n        // LOAD_CST\n        graph = new StrategyGraph();\n        node_t n1 = graph->new_node(GadgetType::LOAD);\n        node_t n2 = graph->new_node(GadgetType::MOV_CST);\n        Node& node1 = graph->nodes[n1];\n        Node& node2 = graph->nodes[n2];\n        node1.branch_type = BranchType::RET;\n        node2.branch_type = BranchType::RET;\n        // First node is reg <- mem(X + C)\n        // Second is X <- src_cst - C \n        node1.params[PARAM_LOAD_DST_REG].make_reg(instr.args[PARAM_LOADCST_DST_REG]);\n        node1.params[PARAM_LOAD_DST_REG].is_data_link = true;\n        node1.params[PARAM_LOAD_SRC_ADDR_REG].make_reg(-1, false); // Free\n        node1.params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst(-1, graph->new_name(\"offset\"), false);\n        node1.node_assertion.valid_pointers.add_valid_pointer(PARAM_LOAD_SRC_ADDR_REG);\n        \n        node2.params[PARAM_MOVCST_DST_REG].make_reg(n1, PARAM_LOAD_SRC_ADDR_REG); // node2 X is same as addr reg in node1\n        node2.params[PARAM_MOVCST_SRC_CST].make_cst(n1, PARAM_LOAD_SRC_ADDR_OFFSET, \n            instr.args[PARAM_LOADCST_SRC_ADDR_OFFSET] - exprvar(arch->bits, node1.params[PARAM_LOAD_SRC_ADDR_OFFSET].name)\n            , graph->new_name(\"cst\")); // node2 cst is the target const C minus the offset in the node1 load\n        \n        graph->add_param_edge(n2, n1);\n        graph->add_strategy_edge(n2, n1);\n        \n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::ALOAD_CST ){\n        // ALOAD_CST\n        graph = new StrategyGraph();\n        node_t n1 = graph->new_node(GadgetType::ALOAD);\n        node_t n2 = graph->new_node(GadgetType::MOV_CST);\n        Node& node1 = graph->nodes[n1];\n        Node& node2 = graph->nodes[n2];\n        node1.branch_type = BranchType::RET;\n        node2.branch_type = BranchType::RET;\n        // First node is reg Op<- mem(X + C)\n        // Second is X <- src_cst - C \n        node1.params[PARAM_ALOAD_DST_REG].make_reg(instr.args[PARAM_ALOADCST_DST_REG]);\n        node1.params[PARAM_ALOAD_DST_REG].is_data_link = true;\n        node1.params[PARAM_ALOAD_OP].make_op((Op)instr.args[PARAM_ALOADCST_OP]);\n        node1.params[PARAM_ALOAD_SRC_ADDR_REG].make_reg(-1, false); // Free\n        node1.params[PARAM_ALOAD_SRC_ADDR_OFFSET].make_cst(-1, graph->new_name(\"offset\"), false);\n        node1.node_assertion.valid_pointers.add_valid_pointer(PARAM_ALOAD_SRC_ADDR_REG);\n\n        node2.params[PARAM_MOVCST_DST_REG].make_reg(n1, PARAM_ALOAD_SRC_ADDR_REG); // node2 X is same as addr reg in node1\n        node2.params[PARAM_MOVCST_SRC_CST].make_cst(n1, PARAM_ALOAD_SRC_ADDR_OFFSET, \n            instr.args[PARAM_ALOADCST_SRC_ADDR_OFFSET] - exprvar(arch->bits, node1.params[PARAM_ALOAD_SRC_ADDR_OFFSET].name)\n            , graph->new_name(\"cst\")); // node2 cst is the target const C minus the offset in the node1 load\n        \n        graph->add_param_edge(n2, n1);\n        graph->add_strategy_edge(n2, n1);\n        \n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::STORE ){\n        // STORE\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::STORE);\n        Node& node = graph->nodes[n];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_STORE_DST_ADDR_REG].make_reg(instr.args[PARAM_STORE_DST_ADDR_REG]);\n        node.params[PARAM_STORE_DST_ADDR_REG].is_data_link = true;\n        node.params[PARAM_STORE_DST_ADDR_OFFSET].make_cst(instr.args[PARAM_STORE_DST_ADDR_OFFSET], graph->new_name(\"offset\"));\n        node.params[PARAM_STORE_SRC_REG].make_reg(instr.args[PARAM_STORE_SRC_REG]);\n        node.params[PARAM_STORE_SRC_REG].is_data_link = true;\n        node.node_assertion.valid_pointers.add_valid_pointer(PARAM_STORE_DST_ADDR_REG);\n        \n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::CST_STORE ){\n        // CST_STORE\n        graph = new StrategyGraph();\n        node_t n1 = graph->new_node(GadgetType::STORE);\n        node_t n2 = graph->new_node(GadgetType::MOV_CST);\n        Node& node1 = graph->nodes[n1];\n        Node& node2 = graph->nodes[n2];\n        node1.branch_type = BranchType::RET;\n        node2.branch_type = BranchType::RET;\n        // First node is mem(X + C) <- reg\n        // Second is X <- src_cst - C \n        node1.params[PARAM_STORE_SRC_REG].make_reg(instr.args[PARAM_CSTSTORE_SRC_REG]);\n        node1.params[PARAM_STORE_SRC_REG].is_data_link = true;\n        node1.params[PARAM_STORE_DST_ADDR_REG].make_reg(-1, false); // Free\n        node1.params[PARAM_STORE_DST_ADDR_OFFSET].make_cst(-1, graph->new_name(\"offset\"), false);\n        node1.strategy_constraints.push_back(\n            // Can not adjust the addr_reg if it is the same as the reg that must be written\n            // (i.e mov [ecx+8], ecx can't become mov [0x12345678], ecx\n            [](Node* n, StrategyGraph* g, Arch* arch)->bool{\n                return n->params[PARAM_STORE_DST_ADDR_REG].value != n->params[PARAM_STORE_SRC_REG].value;\n            }\n        );\n        node1.node_assertion.valid_pointers.add_valid_pointer(PARAM_STORE_DST_ADDR_REG);\n        \n        node2.params[PARAM_MOVCST_DST_REG].make_reg(n1, PARAM_STORE_DST_ADDR_REG); // node2 X is same as addr reg in node1\n        node2.params[PARAM_MOVCST_SRC_CST].make_cst(n1, PARAM_STORE_DST_ADDR_OFFSET, \n            instr.args[PARAM_CSTSTORE_DST_ADDR_OFFSET] - exprvar(arch->bits, node1.params[PARAM_STORE_DST_ADDR_OFFSET].name)\n            , graph->new_name(\"cst\")); // node2 cst is the target const C minus the offset in the node1 load\n        \n        graph->add_param_edge(n2, n1);\n        graph->add_strategy_edge(n2, n1);\n        \n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::ASTORE ){\n        // ASTORE\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::ASTORE);\n        Node& node = graph->nodes[n];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_ASTORE_DST_ADDR_REG].make_reg(instr.args[PARAM_ASTORE_DST_ADDR_REG]);\n        node.params[PARAM_ASTORE_DST_ADDR_REG].is_data_link = true;\n        node.params[PARAM_ASTORE_DST_ADDR_OFFSET].make_cst(instr.args[PARAM_ASTORE_DST_ADDR_OFFSET], graph->new_name(\"offset\"));\n        node.params[PARAM_ASTORE_OP].make_op((Op)instr.args[PARAM_ASTORE_OP]);\n        node.params[PARAM_ASTORE_SRC_REG].make_reg(instr.args[PARAM_ASTORE_SRC_REG]);\n        node.params[PARAM_ASTORE_SRC_REG].is_data_link = true;\n        node.node_assertion.valid_pointers.add_valid_pointer(PARAM_ASTORE_DST_ADDR_REG);\n        \n        graph->update_size();\n        graphs.push_back(graph);\n        \n    }else if( instr.type == ILInstructionType::CST_ASTORE ){\n        // CST_ASTORE\n        graph = new StrategyGraph();\n        node_t n1 = graph->new_node(GadgetType::ASTORE);\n        node_t n2 = graph->new_node(GadgetType::MOV_CST);\n        Node& node1 = graph->nodes[n1];\n        Node& node2 = graph->nodes[n2];\n        node1.branch_type = BranchType::RET;\n        node2.branch_type = BranchType::RET;\n        // First node is mem(X + C) op<- reg\n        // Second is X <- src_cst - C\n        node1.params[PARAM_ASTORE_SRC_REG].make_reg(instr.args[PARAM_CSTASTORE_SRC_REG]);\n        node1.params[PARAM_ASTORE_SRC_REG].is_data_link = true;\n        node1.params[PARAM_ASTORE_OP].make_op((Op)instr.args[PARAM_CSTASTORE_OP]);\n        node1.params[PARAM_ASTORE_DST_ADDR_REG].make_reg(-1, false); // Free\n        node1.params[PARAM_ASTORE_DST_ADDR_OFFSET].make_cst(-1, graph->new_name(\"offset\"), false);\n        node1.strategy_constraints.push_back(\n            // Can not adjust the addr_reg if it is the same as the reg that must be written\n            // (i.e mov [ecx+8], ecx can't become mov [0x12345678], ecx\n            [](Node* n, StrategyGraph* g, Arch * arch)->bool{\n                return n->params[PARAM_ASTORE_DST_ADDR_REG].value != n->params[PARAM_ASTORE_SRC_REG].value;\n            }\n        );\n        node1.node_assertion.valid_pointers.add_valid_pointer(PARAM_ASTORE_DST_ADDR_REG);\n\n        node2.params[PARAM_MOVCST_DST_REG].make_reg(n1, PARAM_ASTORE_DST_ADDR_REG); // node2 X is same as addr reg in node1\n        node2.params[PARAM_MOVCST_SRC_CST].make_cst(n1, PARAM_ASTORE_DST_ADDR_OFFSET, \n            instr.args[PARAM_CSTASTORE_DST_ADDR_OFFSET] - exprvar(arch->bits, node1.params[PARAM_ASTORE_DST_ADDR_OFFSET].name)\n            , graph->new_name(\"cst\")); // node2 cst is the target const C minus the offset in the node1 load\n\n        graph->add_param_edge(n2, n1);\n        graph->add_strategy_edge(n2, n1);\n\n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::STORE_CST ){\n        // STORE_CST\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::STORE);\n        node_t n1 = graph->new_node(GadgetType::MOV_CST);\n        Node& node = graph->nodes[n];\n        Node& node1 = graph->nodes[n1];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_STORE_DST_ADDR_REG].make_reg(instr.args[PARAM_STORE_DST_ADDR_REG]);\n        node.params[PARAM_STORE_DST_ADDR_OFFSET].make_cst(instr.args[PARAM_STORE_DST_ADDR_OFFSET], graph->new_name(\"offset\"));\n        node.params[PARAM_STORE_SRC_REG].make_reg(-1, false); // Free reg\n        node.node_assertion.valid_pointers.add_valid_pointer(PARAM_STORE_DST_ADDR_REG);\n        \n        node1.branch_type = BranchType::RET;\n        node1.params[PARAM_MOVCST_DST_REG].make_reg(node.id, PARAM_STORE_SRC_REG);\n        node1.params[PARAM_MOVCST_DST_REG].is_data_link = true;\n        node1.params[PARAM_MOVCST_SRC_CST].make_cst(instr.args[PARAM_STORECST_SRC_CST], graph->new_name(\"cst\"));\n        \n        graph->add_strategy_edge(node1.id, node.id);\n        graph->add_param_edge(node1.id, node.id);\n        \n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::CST_STORE_CST ){\n        // CST_STORE_CST\n        graph = new StrategyGraph();\n        if( _cst_store_cst_to_strategy(*graph, instr, arch)){\n            graph->update_size();\n            graphs.push_back(graph);\n        }\n    }else if( instr.type == ILInstructionType::ASTORE_CST ){\n        // ASTORE_CST\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::ASTORE);\n        node_t n1 = graph->new_node(GadgetType::MOV_CST);\n        Node& node = graph->nodes[n];\n        Node& node1 = graph->nodes[n1];\n        node.branch_type = BranchType::RET;\n        node.params[PARAM_ASTORE_DST_ADDR_REG].make_reg(instr.args[PARAM_ASTORE_DST_ADDR_REG]);\n        node.params[PARAM_ASTORE_DST_ADDR_REG].is_data_link = true;\n        node.params[PARAM_ASTORE_DST_ADDR_OFFSET].make_cst(instr.args[PARAM_ASTORE_DST_ADDR_OFFSET], graph->new_name(\"offset\"));\n        node.params[PARAM_ASTORE_SRC_REG].make_reg(-1, false); // Free reg\n        node.params[PARAM_ASTORE_OP].make_op((Op)instr.args[PARAM_ASTORECST_OP]);\n        node.node_assertion.valid_pointers.add_valid_pointer(PARAM_ASTORE_DST_ADDR_REG);\n        \n        node1.branch_type = BranchType::RET;\n        node1.params[PARAM_MOVCST_DST_REG].make_reg(node.id, PARAM_ASTORE_SRC_REG);\n        node1.params[PARAM_MOVCST_DST_REG].is_data_link = true;\n        node1.params[PARAM_MOVCST_SRC_CST].make_cst(instr.args[PARAM_ASTORECST_SRC_CST], graph->new_name(\"cst\"));\n        \n        graph->add_strategy_edge(node1.id, node.id);\n        graph->add_param_edge(node1.id, node.id);\n        \n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::CST_ASTORE_CST ){\n        // CST_ASTORE_CST\n        graph = new StrategyGraph();\n        node_t n1 = graph->new_node(GadgetType::ASTORE);\n        node_t n2 = graph->new_node(GadgetType::MOV_CST);\n        node_t n3 = graph->new_node(GadgetType::MOV_CST);\n        Node& node1 = graph->nodes[n1];\n        Node& node2 = graph->nodes[n2];\n        Node& node3 = graph->nodes[n3];\n        node1.branch_type = BranchType::RET;\n        node2.branch_type = BranchType::RET;\n        node3.branch_type = BranchType::RET;\n        // First node is mem(X + C) <- reg\n        // Second is X <- src_cst - C \n        node1.params[PARAM_ASTORE_OP].make_op((Op)instr.args[PARAM_CSTASTORECST_OP]);\n        node1.params[PARAM_ASTORE_SRC_REG].make_reg(-1, false); // Free reg\n        node1.params[PARAM_ASTORE_DST_ADDR_REG].make_reg(-1, false); // Free reg\n        node1.params[PARAM_ASTORE_DST_ADDR_OFFSET].make_cst(-1, graph->new_name(\"offset\"), false); // Free offset also\n        node1.strategy_constraints.push_back(\n            // Can not adjust the addr_reg if it is the same as the reg that must be written\n            // (i.e mov [ecx+8], ecx can't become mov [0x12345678], ecx\n            [](Node* n, StrategyGraph* g, Arch* arch)->bool{\n                return n->params[PARAM_ASTORE_DST_ADDR_REG].value != n->params[PARAM_ASTORE_SRC_REG].value;\n            }\n        );\n        node1.node_assertion.valid_pointers.add_valid_pointer(PARAM_ASTORE_DST_ADDR_REG);\n        \n        node2.params[PARAM_MOVCST_DST_REG].make_reg(n1, PARAM_ASTORE_DST_ADDR_REG); // node2 X is same as addr reg in node1\n        node2.params[PARAM_MOVCST_DST_REG].is_data_link = true;\n        node2.params[PARAM_MOVCST_SRC_CST].make_cst(n1, PARAM_ASTORE_DST_ADDR_OFFSET, \n            instr.args[PARAM_CSTASTORECST_DST_ADDR_OFFSET] - exprvar(arch->bits, node1.params[PARAM_ASTORE_DST_ADDR_OFFSET].name)\n            , graph->new_name(\"cst\")); // node2 cst is the target const C minus the offset in the node1 load\n        \n        node3.params[PARAM_MOVCST_DST_REG].make_reg(n1, PARAM_ASTORE_SRC_REG);\n        node3.params[PARAM_MOVCST_DST_REG].is_data_link = true;\n        node3.params[PARAM_MOVCST_SRC_CST].make_cst(instr.args[PARAM_CSTASTORECST_SRC_CST], graph->new_name(\"cst\"));\n\n        graph->add_param_edge(n2, n1);\n        graph->add_strategy_edge(n2, n1);\n        graph->add_param_edge(n3, n1);\n        graph->add_strategy_edge(n3, n1);\n\n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::FUNCTION ){\n        graph = new StrategyGraph();\n        bool success = false;\n        switch( abi ){\n            case ABI::X86_CDECL: success = _x86_cdecl_to_strategy(*graph, instr); break;\n            case ABI::X86_STDCALL: success = _x86_stdcall_to_strategy(*graph, instr); break;\n            case ABI::NONE: throw compiler_exception(\"You have to specify which ABI to use to call functions\");\n            default:\n                throw compiler_exception(\"il_instruction_to_strategy(): Unsupported ABI for function call\");\n        }\n        if( !success ){\n            throw compiler_exception(\"Couldn't translate function call into a chaining strategy\");\n        }\n\n        graph->update_size();\n        graphs.push_back(graph);\n    }else if( instr.type == ILInstructionType::SINGLE_SYSCALL ){\n        graph = new StrategyGraph();\n        node_t n = graph->new_node(GadgetType::SYSCALL);\n        graph->nodes[n].branch_type = BranchType::ANY;\n        graph->update_size();\n        graphs.push_back(graph);\n    }else{\n        throw runtime_exception(\"il_instruction_to_strategy(): unsupported ILInstructionType\");\n    }\n}\n"
  },
  {
    "path": "libropium/compiler/il.cpp",
    "content": "#include \"il.hpp\"\n#include <string>\n#include <cctype>\n#include \"exception.hpp\"\n#include <iostream>\n\nusing std::string;\n\n/* ======= Parse IL Instructions ========== */\n\nvoid _skip_whitespace(string& str, int& idx){\n    while( isspace(str[idx]) && str[idx] != '\\n' && idx < str.size()){\n        idx++;\n    }\n}\n\nbool _parse_end(string& str, int& idx){\n    while( idx < str.size() ){\n        if( !isspace(str[idx]) )\n            return false;\n        idx++;\n    }\n    return true;\n}\n\nbool _parse_il_cst(Arch& arch, vector<cst_t>& args, string& str, int& idx){\n    string s;\n    int i;\n    int base = 10;\n    ucst_t cst;\n    cst_t mult;\n\n    _skip_whitespace(str, idx);\n    // Check if sign - in front of constant\n    if( str[idx] == '-' ){\n        mult = -1;\n        idx++;\n    }else\n        mult = 1;\n\n    _skip_whitespace(str, idx);\n    // Check if hexa \n    if( str.substr(idx, 2) == \"0x\"){\n        idx += 2;\n        s = \"0x\";\n        base = 16;\n    }\n\n    i = idx;\n    if( base == 10 ){\n        while( i < str.size() && isdigit(str[i])){\n            s += str[i++];\n        }\n    }else if( base == 16 ){\n        while( i < str.size() && isxdigit(str[i])){\n            s += str[i++];\n        }\n    }\n    \n    try{\n        cst = std::stoull(s, 0, base);\n        // Check if cst is not too big\n        if( arch.octets < 8 && (cst >= (ucst_t)((ucst_t)1<<(arch.bits)))){\n            return false;\n        }\n        idx = i;\n        args.push_back(cst * mult);\n        return true;\n    }catch(std::invalid_argument const& e){\n        return false;\n    }catch(std::out_of_range const& e){\n        return false;\n    }\n}\n\nbool _parse_il_reg(Arch& arch, vector<cst_t>& args, string& str, int& idx){\n    string s;\n    string prev;\n    int i, prev_i;\n    bool found = false;\n    \n    _skip_whitespace(str, idx);\n    i = idx;\n    \n    while( i < str.size() && !isspace(str[i])){\n        s += str[i++];\n\n        if( arch.is_valid_reg(s) ){\n            found = true;\n            prev = s;\n            prev_i = i;\n        }else if( found ){\n            break;\n        }\n    }\n    if( found ){\n        args.push_back(arch.reg_num(prev));\n        idx = prev_i;\n        return true;        \n    }else\n        return false;\n}\n\nbool _parse_il_string( string& res, string& str, int& idx){\n    int i = idx;\n    string s = \"\";\n    char delimiter;\n\n    _skip_whitespace(str, i);\n    \n    // Check if starts with strin delimiter \" or '\n    if( str.size() - i < 2 )\n        return false;\n    if( str[i] == '\\'' ){\n        delimiter = '\\'';\n    }else if( str[i] == '\"'){\n        delimiter = '\"';\n    }else{\n        return false;\n    }\n    // Get string\n    i++;\n    while( i < str.size() && str[i] != delimiter){\n        if( str[i] == '\\\\' ){\n            // Escape sequence\n            // Escapes '\\' ? \n            if( i+1 >= str.size() ){\n                return false;\n            }else if( str[i+1] == '\\\\' ){\n                s += '\\\\';\n                i += 2;\n            }\n            // Escapes delimiter ?\n            else if( str[i+1] == delimiter ){\n                s += delimiter;\n                i += 2;\n            }\n            // Escapes a hex byte ? '\\x...'\n            else if( i+3 >= str.size() ){\n                return false;\n            }else if( str[i+1] == 'x' && isxdigit(str[i+2]) && isxdigit(str[i+3])){\n                unsigned int byte = std::stoul(str.substr(i+2, 2), nullptr, 16);\n                s += (char)byte;\n                i += 4;\n            }else{\n                return false;\n            }\n        }else{\n            // Normal char\n            s += str[i++];\n        }\n    }\n    // Check and return\n    if( i == str.size() )\n        return false; // No end delimiter found\n    else if( s.empty() ){\n        return false; // Empty string not allowed (use \"\\x00\" instead\n    }else{\n        res = s;\n        idx = ++i; // Increment i to make it point after the last delimiter\n        return true;\n    }\n}\n\nbool _parse_il_affect(string& str, int& idx){\n    _skip_whitespace(str, idx);\n    if( idx >= str.size())\n        return false;\n    if( str[idx] == '=' ){\n        idx++;\n        return true;\n    }else\n        return false;\n}\n\nbool _parse_il_mem_start(string& str, int& idx){\n    _skip_whitespace(str, idx);\n    if( idx > str.size()-1)\n        return false;\n    if( str.substr(idx, 1) == \"[\" ){\n        idx+=1;\n        return true;\n    }else\n        return false;\n}\n\nbool _parse_il_mem_end(string& str, int& idx){\n    _skip_whitespace(str, idx);\n    if( idx >= str.size())\n        return false;\n    if( str[idx] == ']' ){\n        idx++;\n        return true;\n    }else\n        return false;\n}\n\nbool _parse_il_function_args_list(Arch& arch, vector<cst_t>& args, vector<int>& args_type, string& str, int& i){\n    _skip_whitespace(str, i);\n    // Parse first argument if any\n    if( _parse_il_cst(arch, args, str, i)){\n        args_type.push_back(IL_FUNC_ARG_CST);\n    }else if( _parse_il_reg(arch, args, str, i)){\n        args_type.push_back(IL_FUNC_ARG_REG);\n    }\n    \n    // Go to next char\n    _skip_whitespace(str, i);\n    // If coma, next argument is expected\n    if( str[i] == ','){\n        return _parse_il_function_args_list(arch, args, args_type, str, ++i);\n    // Else just return true\n    }else{\n        return true;\n    }\n}\n\n// (arg1, arg2, arg3, ... )\n// or just ()\nbool _parse_il_function_args(Arch& arch, vector<cst_t>& args, vector<int>& args_type, string& str, int& idx){\n    int i = idx;\n    _skip_whitespace(str, i);\n    if( i >= str.size())\n        return false;\n    if( str[i] != '(' )\n        return false;\n    i++;\n    // Parse args list\n    if( _parse_il_function_args_list(arch, args, args_type, str, i)){\n        _skip_whitespace(str, i);\n        if( str[i] == ')' ){\n            idx = i+1;\n            return true;\n        }else\n            return false;\n    }else{\n        return false;\n    }\n}\n\nbool _parse_il_syscall_name( string& name, string& str, int& idx){\n    int i = idx;\n    string s = \"\";\n\n    _skip_whitespace(str, i);\n    \n    // Check if starts with sys_\n    if( str.size() - i < 4 )\n        return false;\n    if( str.substr(i, 4) != \"sys_\" ){\n        return false;\n    }else{\n        i += 4;\n    }\n    // Get name\n    while( i < str.size() && (isalpha(str[i]) || isdigit(str[i]))){\n        s += str[i++];\n    }\n    // Check and return\n    if( s.empty() )\n        return false; // No empty syscall name allowed\n    else{\n        name = s;\n        idx = i;\n        return true;\n    }\n}\n\nbool _parse_il_single_syscall(Arch& arch, ILInstruction* instr, string& str){\n    int i = 0;\n    _skip_whitespace(str, i);\n    // Check if starts with sys_\n    if( str.size() - i < 7 )\n        return false;\n    if( str.substr(i, 7) != \"syscall\" ){\n        return false;\n    }else{\n        i += 7;\n    }\n    if( _parse_end(str, i)){\n        instr->type = ILInstructionType::SINGLE_SYSCALL;\n        return true;\n    }else{\n        return false;\n    }\n}\n\nbool _parse_il_syscall_num( Arch& arch, int& num, string& str, int& idx){\n    int i = idx;\n    vector<cst_t> args;\n\n    _skip_whitespace(str, i);\n\n    // Check if starts with sys_\n    if( str.size() - i < 4 )\n        return false;\n    if( str.substr(i, 4) != \"sys_\" ){\n        return false;\n    }else{\n        i += 4;\n    }\n    // Get num\n    if( ! _parse_il_cst(arch, args, str, i)){\n        return false;\n    }else{\n        num = args[0];\n        idx = i;\n        return true;\n    }\n}\n\n\nbool _parse_il_reg_and_offset(Arch& arch, vector<cst_t>& args, string& str, int& idx){\n    cst_t mult;\n    // Get reg\n    _skip_whitespace(str, idx);\n    \n    if( !_parse_il_reg(arch, args, str, idx) ){\n        return false;\n    }\n\n    // Parse op (+ or - )\n    _skip_whitespace(str, idx);\n    if( str[idx] == '+' ){\n        mult = 1;\n        idx++;\n    }else if( str[idx] == '-' ){\n        mult = -1;\n        idx++;\n    }else{\n        args.push_back(0); // Push null offset\n        return true;\n    }\n    // Parse offset\n    _skip_whitespace(str, idx);\n    \n    \n    if( _parse_il_cst(arch, args, str, idx)){\n        args.back() = args.back() * mult; // Adjust const\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_binop(vector<cst_t>& args, string& str, int& idx){\n    Op res = Op::NONE;\n    \n    _skip_whitespace(str, idx);\n    if( idx == str.size())\n        return false;\n    switch( str[idx] ){\n        case '+': idx++; res =  Op::ADD; break;\n        case '/': idx++; res =  Op::DIV; break;\n        case '*': idx++; res =  Op::MUL; break;\n        case '^': idx++; res =  Op::XOR; break;\n        case '&': idx++; res =  Op::AND; break;\n        case '|': idx++; res =  Op::OR; break;\n        case '%': idx++; res =  Op::MOD; break;\n        default: break;\n    }\n    if( res == Op::NONE ){\n        if( str.substr(idx,2) == \"<<\" ){\n            idx += 2; res =  Op::SHL;\n        }else if( str.substr(idx,2) == \">>\" ){\n            idx += 2; res =  Op::SHR;\n        }else{\n            return false;\n        }\n    }\n    \n    args.push_back((int)res);\n    return true;\n}\n\nbool _parse_il_unop(vector<cst_t>& args, string& str, int& idx){\n    Op res;\n    _skip_whitespace(str, idx);\n    if( idx == str.size())\n        return false;\n    switch( str[idx] ){\n        case '-': idx++; res = Op::NEG; break;\n        case '~': idx++; res = Op::NOT; break;\n        default:\n            return false;\n    }\n    args.push_back((int)res);\n    return true;\n}\n\nbool _parse_il_mov_reg(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_reg(arch, args, str, idx) && \n        _parse_il_affect(str, idx) && \n        _parse_il_reg(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::MOV_REG;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_mov_cst(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_reg(arch, args, str, idx) && \n        _parse_il_affect(str, idx) && \n        _parse_il_cst(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::MOV_CST;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_amov_cst(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_reg(arch, args, str, idx) && \n        _parse_il_affect(str, idx) && \n        _parse_il_reg(arch, args, str, idx) &&\n        _parse_il_binop(args, str, idx) &&\n        _parse_il_cst(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::AMOV_CST;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_amov_reg(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_reg(arch, args, str, idx) && \n        _parse_il_affect(str, idx) && \n        _parse_il_reg(arch, args, str, idx) &&\n        _parse_il_binop(args, str, idx) &&\n        _parse_il_reg(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::AMOV_REG;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_load(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_reg(arch, args, str, idx) && \n        _parse_il_affect(str, idx) &&\n        _parse_il_mem_start(str, idx) &&\n        _parse_il_reg_and_offset(arch, args, str, idx) && \n        _parse_il_mem_end(str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::LOAD;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_aload(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_reg(arch, args, str, idx) && \n        _parse_il_binop(args, str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_mem_start(str, idx) &&\n        _parse_il_reg_and_offset(arch, args, str, idx) && \n        _parse_il_mem_end(str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::ALOAD;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_load_cst(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_reg(arch, args, str, idx) && \n        _parse_il_affect(str, idx) &&\n        _parse_il_mem_start(str, idx) &&\n        _parse_il_cst(arch, args, str, idx) && \n        _parse_il_mem_end(str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::LOAD_CST;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_aload_cst(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_reg(arch, args, str, idx) && \n        _parse_il_binop(args, str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_mem_start(str, idx) &&\n        _parse_il_cst(arch, args, str, idx) && \n        _parse_il_mem_end(str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::ALOAD_CST;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_store(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_mem_start(str, idx) &&\n         _parse_il_reg_and_offset(arch, args, str, idx) &&\n        _parse_il_mem_end(str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_reg(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::STORE;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_astore(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_mem_start(str, idx) &&\n         _parse_il_reg_and_offset(arch, args, str, idx) &&\n        _parse_il_mem_end(str, idx) &&\n        _parse_il_binop(args, str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_reg(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::ASTORE;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_cst_store(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_mem_start(str, idx) &&\n         _parse_il_cst(arch, args, str, idx) &&\n        _parse_il_mem_end(str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_reg(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::CST_STORE;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_cst_astore(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_mem_start(str, idx) &&\n         _parse_il_cst(arch, args, str, idx) &&\n        _parse_il_mem_end(str, idx) &&\n        _parse_il_binop(args, str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_reg(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::CST_ASTORE;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_store_cst(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_mem_start(str, idx) &&\n         _parse_il_reg_and_offset(arch, args, str, idx) &&\n        _parse_il_mem_end(str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_cst(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::STORE_CST;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_astore_cst(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_mem_start(str, idx) &&\n         _parse_il_reg_and_offset(arch, args, str, idx) &&\n        _parse_il_mem_end(str, idx) &&\n        _parse_il_binop(args, str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_cst(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::ASTORE_CST;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_cst_store_cst(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_mem_start(str, idx) &&\n         _parse_il_cst(arch, args, str, idx) &&\n        _parse_il_mem_end(str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_cst(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::CST_STORE_CST;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_cst_astore_cst(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    if  (_parse_il_mem_start(str, idx) &&\n         _parse_il_cst(arch, args, str, idx) &&\n        _parse_il_mem_end(str, idx) &&\n        _parse_il_binop(args, str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_cst(arch, args, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->type = ILInstructionType::CST_ASTORE_CST;\n        return true;\n    }\n    return false;\n}\n\n\nbool _parse_il_cst_store_string(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    string s;\n    if  (_parse_il_mem_start(str, idx) &&\n         _parse_il_cst(arch, args, str, idx) &&\n        _parse_il_mem_end(str, idx) &&\n        _parse_il_affect(str, idx) &&\n        _parse_il_string(s, str, idx) &&\n        _parse_end(str, idx))\n    {\n        instr->args = args;\n        instr->str = s;\n        instr->type = ILInstructionType::CST_STORE_STRING;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_function(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    vector<int> args_type;\n    if  (_parse_il_cst(arch, args, str, idx) &&\n         _parse_il_function_args(arch, args, args_type, str, idx) &&\n        _parse_end(str, idx))\n    {\n        \n        instr->args = args;\n        instr->args_type = args_type;\n        instr->args_type.insert(instr->args_type.begin(), -1); // Because first arg is the function address :/\n        instr->type = ILInstructionType::FUNCTION;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_syscall_by_name(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    vector<int> args_type;\n    string name;\n    if  (_parse_il_syscall_name(name, str, idx) &&\n         _parse_il_function_args(arch, args, args_type, str, idx) &&\n        _parse_end(str, idx))\n    {\n        \n        instr->syscall_name = name;\n        instr->args = args;\n        instr->args_type = args_type;\n        instr->type = ILInstructionType::SYSCALL;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_syscall_by_num(Arch& arch, ILInstruction* instr, string& str){\n    int idx = 0;\n    vector<cst_t> args;\n    vector<int> args_type;\n    int num;\n    if  (_parse_il_syscall_num(arch, num, str, idx) &&\n         _parse_il_function_args(arch, args, args_type, str, idx) &&\n        _parse_end(str, idx))\n    {\n        \n        instr->syscall_num = num;\n        instr->args = args;\n        instr->args_type = args_type;\n        instr->type = ILInstructionType::SYSCALL;\n        return true;\n    }\n    return false;\n}\n\nbool _parse_il_syscall(Arch& arch, ILInstruction* instr, string& str){\n    // NUM before NAME otherwise the num is just parsed as a name string :) \n    return  _parse_il_syscall_by_num(arch, instr, str) ||  \n            _parse_il_syscall_by_name(arch, instr, str);\n}\n\nbool _parse_il_instruction(Arch& arch, ILInstruction* instr, string& str){\n    return  _parse_il_mov_cst(arch, instr, str) || \n            _parse_il_mov_reg(arch, instr, str) ||\n            _parse_il_amov_cst(arch, instr, str) ||\n            _parse_il_amov_reg(arch, instr, str) ||\n            _parse_il_load(arch, instr, str) ||\n            _parse_il_load_cst(arch, instr, str) ||\n            _parse_il_aload(arch, instr, str) ||\n            _parse_il_aload_cst(arch, instr, str) ||\n            _parse_il_store(arch, instr, str) ||\n            _parse_il_cst_store(arch, instr, str) ||\n            _parse_il_astore(arch, instr, str) ||\n            _parse_il_cst_astore(arch, instr, str) ||\n            _parse_il_store_cst(arch, instr, str) ||\n            _parse_il_cst_store_cst(arch, instr, str) ||\n            _parse_il_astore_cst(arch, instr, str) ||\n            _parse_il_cst_astore_cst(arch, instr, str) ||\n            _parse_il_cst_store_string(arch, instr, str) ||\n            _parse_il_function(arch, instr, str) ||\n            _parse_il_syscall(arch, instr, str) ||\n            _parse_il_single_syscall(arch, instr, str);\n}\n\n\nILInstruction::ILInstruction(Arch& arch, string str){\n    if( !_parse_il_instruction(arch, this, str)){\n        throw il_exception(\"Invalid instruction string\");\n    }\n}\n\nILInstruction::ILInstruction(ILInstructionType t, vector<cst_t>* a, vector<int>* at , string sname, int snum, string s){\n    type = t;\n    syscall_name = sname;\n    syscall_num = snum;\n    str = s;\n    if( a )\n        args = *a;\n    if( at )\n        args_type = *at;\n}\n"
  },
  {
    "path": "libropium/compiler/strategy_graph.cpp",
    "content": "#include \"strategy.hpp\"\n#include \"expression.hpp\"\n#include \"exception.hpp\"\n#include <algorithm>\n\n/* ===============  Node Assertions ============== */\n\nvoid NodeValidPointers::add_valid_pointer(param_t p){\n    _params.push_back(p);\n}\n\nvoid NodeValidPointers::to_assertion(Node& node, Assertion* assertion){\n    for( auto p : _params ){\n        assertion->valid_pointers.add_valid_pointer(node.params[p].value);\n    }\n}\n\nvoid NodeValidPointers::clear(){\n    _params.clear();\n}\n\n\nvoid NodeAssertion::clear(){\n    valid_pointers.clear();\n}\n\nvoid NodeAssertion::to_assertion(Node& node, Assertion * a){\n    valid_pointers.to_assertion(node, a);\n}\n\n/* ===============  Nodes ============== */\n\nbool constraint_branch_type(Node* node, StrategyGraph* graph, Arch* arch){\n    return (node->affected_gadget->branch_type == node->branch_type) ||\n           (node->branch_type == BranchType::ANY);\n}\n\nNode::Node(int i, GadgetType t):id(i), type(t), branch_type(BranchType::ANY), is_indirect(false), is_disabled(false),\n                                affected_gadget(nullptr){\n    mandatory_following_node = -1;\n    // Add constraints that must always be verified\n    assigned_gadget_constraints.push_back(constraint_branch_type); // Matching branch type\n};\n\nbool Node::has_mandatory_following_node(){\n    return mandatory_following_node != -1;\n}\n\nint Node::nb_params(){\n    switch( type ){\n        case GadgetType::MOV_REG: return NB_PARAM_MOVREG;\n        case GadgetType::MOV_CST: return NB_PARAM_MOVCST;\n        case GadgetType::AMOV_CST: return NB_PARAM_AMOVCST;\n        case GadgetType::AMOV_REG: return NB_PARAM_AMOVREG;\n        case GadgetType::LOAD: return NB_PARAM_LOAD;\n        case GadgetType::ALOAD: return NB_PARAM_ALOAD;\n        case GadgetType::STORE: return NB_PARAM_STORE;\n        case GadgetType::ASTORE: return NB_PARAM_ASTORE;\n        case GadgetType::SYSCALL: return NB_PARAM_SYSCALL;\n        case GadgetType::INT80: return NB_PARAM_INT80;\n        \n        default: throw runtime_exception(\"Unsupported gadget type in Node::nb_params()\");\n    }\n}\n\nbool Node::has_free_param(){\n    for( int p = 0; p < nb_params(); p++){\n        if( params[p].is_free() )\n            return true;\n    }\n    return false;\n}\n\nbool Node::is_final_param(param_t param){\n    return strategy_edges.out.empty() &&\n           ( (has_dst_reg_param() && (param == get_param_num_dst_reg())) || \n              (has_dst_addr_reg_param() && param == get_param_num_dst_addr_reg()));\n}\n\nbool Node::is_initial_param(param_t param){\n    return is_src_param(param);\n}\n\nbool Node::has_dst_reg_param(){\n    return  type == GadgetType::MOV_CST || \n            type == GadgetType::MOV_REG ||\n            type == GadgetType::AMOV_CST ||\n            type == GadgetType::AMOV_REG ||   \n            type == GadgetType::LOAD ||\n            type == GadgetType::ALOAD; \n}\n\nbool Node::has_dst_addr_reg_param(){\n    return  type == GadgetType::STORE || \n            type == GadgetType::ASTORE;\n}\n\nbool Node::is_src_param(param_t param){\n        switch( type ){\n        case GadgetType::MOV_REG:\n            return param == PARAM_MOVREG_SRC_REG;\n        case GadgetType::MOV_CST:\n            return false;\n        case GadgetType::AMOV_CST:\n            return param == PARAM_AMOVCST_SRC_REG;\n        case GadgetType::AMOV_REG:\n            return param == PARAM_AMOVREG_SRC_REG1 ||\n                   param == PARAM_AMOVREG_SRC_REG2;\n        case GadgetType::LOAD:\n            return param == PARAM_LOAD_SRC_ADDR_REG;\n        case GadgetType::ALOAD:\n            return param == PARAM_ALOAD_SRC_ADDR_REG;\n        case GadgetType::STORE:\n            return param == PARAM_STORE_SRC_REG;\n        case GadgetType::ASTORE:\n            return param == PARAM_ASTORE_SRC_REG;\n        case GadgetType::SYSCALL:\n        case GadgetType::INT80:\n            return false;\n        default:\n            throw runtime_exception(QuickFmt() << \"Node::is_src_param(): got unsupported node type \" << (int)type >> QuickFmt::to_str);\n    }\n}\n\nbool Node::is_generic_param(param_t param){\n    return param == get_param_num_gadget_addr() || \n           param == get_param_num_gadget_jmp_reg() || \n           param == get_param_num_gadget_sp_delta() || \n           param == get_param_num_gadget_sp_inc();\n}\n\n\nvoid Node::add_incoming_strategy_edge(node_t src_node){\n    if( std::find(strategy_edges.in.begin(), strategy_edges.in.end(), src_node) == strategy_edges.in.end()){\n        strategy_edges.in.push_back(src_node);\n    }\n}\n\nvoid Node::add_incoming_param_edge(node_t src_node){\n    if( std::find(param_edges.in.begin(), param_edges.in.end(), src_node) == param_edges.in.end()){\n        param_edges.in.push_back(src_node);\n    }\n}\nvoid Node::add_outgoing_strategy_edge(node_t dst_node){\n    if( std::find(strategy_edges.out.begin(), strategy_edges.out.end(), dst_node) == strategy_edges.out.end()){\n        strategy_edges.out.push_back(dst_node);\n    }\n}\n\nvoid Node::add_outgoing_param_edge(node_t dst_node){\n    if( std::find(param_edges.out.begin(), param_edges.out.end(), dst_node) == param_edges.out.end()){\n        param_edges.out.push_back(dst_node);\n    }\n}\n\nvoid Node::remove_incoming_strategy_edge(node_t src_node){\n    strategy_edges.in.erase(std::remove(strategy_edges.in.begin(), strategy_edges.in.end(), src_node), strategy_edges.in.end());\n}\n\nvoid Node::remove_incoming_param_edge(node_t src_node){\n     param_edges.in.erase(std::remove(param_edges.in.begin(), param_edges.in.end(), src_node), param_edges.in.end());\n}\n\nvoid Node::remove_outgoing_strategy_edge(node_t dst_node){\n     strategy_edges.out.erase(std::remove(strategy_edges.out.begin(), strategy_edges.out.end(), dst_node), strategy_edges.out.end());\n}\n\nvoid Node::remove_outgoing_param_edge(node_t dst_node){\n     param_edges.out.erase(std::remove(param_edges.out.begin(), param_edges.out.end(), dst_node), param_edges.out.end());\n}\n     \nint Node::get_param_num_data_link(){\n    switch( type ){\n        case GadgetType::MOV_REG: return PARAM_MOVREG_DATA_LINK;\n        case GadgetType::AMOV_REG: return PARAM_AMOVREG_DATA_LINK;\n        case GadgetType::MOV_CST: return PARAM_MOVCST_DATA_LINK;\n        case GadgetType::AMOV_CST: return PARAM_AMOVCST_DATA_LINK;\n        case GadgetType::LOAD: return PARAM_LOAD_DATA_LINK;\n        case GadgetType::ALOAD: return PARAM_ALOAD_DATA_LINK;\n        case GadgetType::STORE: return PARAM_STORE_DATA_LINK;\n        case GadgetType::ASTORE: return PARAM_ASTORE_DATA_LINK;\n        case GadgetType::SYSCALL: return PARAM_SYSCALL_DATA_LINK;\n        case GadgetType::INT80: return PARAM_INT80_DATA_LINK;\n        default:\n            throw runtime_exception(\"Node::get_param_num_data_link(): got unsupported gadget type\");\n    }\n}\n            \nint Node::get_param_num_gadget_sp_inc(){\n    switch( type ){\n        case GadgetType::MOV_REG: return PARAM_MOVREG_GADGET_SP_INC;\n        case GadgetType::AMOV_REG: return PARAM_AMOVREG_GADGET_SP_INC;\n        case GadgetType::MOV_CST: return PARAM_MOVCST_GADGET_SP_INC;\n        case GadgetType::AMOV_CST: return PARAM_AMOVCST_GADGET_SP_INC;\n        case GadgetType::LOAD: return PARAM_LOAD_GADGET_SP_INC;\n        case GadgetType::ALOAD: return PARAM_ALOAD_GADGET_SP_INC;\n        case GadgetType::STORE: return PARAM_STORE_GADGET_SP_INC;\n        case GadgetType::ASTORE: return PARAM_ASTORE_GADGET_SP_INC;\n        case GadgetType::SYSCALL: return PARAM_SYSCALL_GADGET_SP_INC;\n        case GadgetType::INT80: return PARAM_INT80_GADGET_SP_INC;\n        default:\n            throw runtime_exception(\"Node::get_param_num_gadget_sp_inc(): got unsupported gadget type\");\n    }\n}\n\nint Node::get_param_num_gadget_sp_delta(){\n    switch( type ){\n        case GadgetType::MOV_REG: return PARAM_MOVREG_GADGET_SP_DELTA;\n        case GadgetType::AMOV_REG: return PARAM_AMOVREG_GADGET_SP_DELTA;\n        case GadgetType::MOV_CST: return PARAM_MOVCST_GADGET_SP_DELTA;\n        case GadgetType::AMOV_CST: return PARAM_AMOVCST_GADGET_SP_DELTA;\n        case GadgetType::LOAD: return PARAM_LOAD_GADGET_SP_DELTA;\n        case GadgetType::ALOAD: return PARAM_ALOAD_GADGET_SP_DELTA;\n        case GadgetType::STORE: return PARAM_STORE_GADGET_SP_DELTA;\n        case GadgetType::ASTORE: return PARAM_ASTORE_GADGET_SP_DELTA;\n        case GadgetType::SYSCALL: return PARAM_SYSCALL_GADGET_SP_DELTA;\n        case GadgetType::INT80: return PARAM_INT80_GADGET_SP_DELTA;\n        default:\n            throw runtime_exception(\"Node::get_param_num_gadget_sp_inc(): got unsupported gadget type\");\n    }\n}\n\nint Node::get_param_num_gadget_addr(){\n    switch( type ){\n        case GadgetType::MOV_REG: return PARAM_MOVREG_GADGET_ADDR;\n        case GadgetType::AMOV_REG: return PARAM_AMOVREG_GADGET_ADDR;\n        case GadgetType::MOV_CST: return PARAM_MOVCST_GADGET_ADDR;\n        case GadgetType::AMOV_CST: return PARAM_AMOVCST_GADGET_ADDR;\n        case GadgetType::LOAD: return PARAM_LOAD_GADGET_ADDR;\n        case GadgetType::ALOAD: return PARAM_ALOAD_GADGET_ADDR;\n        case GadgetType::STORE: return PARAM_STORE_GADGET_ADDR;\n        case GadgetType::ASTORE: return PARAM_ASTORE_GADGET_ADDR;\n        case GadgetType::SYSCALL: return PARAM_SYSCALL_GADGET_ADDR;\n        case GadgetType::INT80: return PARAM_INT80_GADGET_ADDR;\n        default:\n            throw runtime_exception(\"Node::get_param_num_gadget_addr(): got unsupported gadget type\");\n    }\n}\n\nint Node::get_param_num_gadget_jmp_reg(){\n    switch( type ){\n        case GadgetType::MOV_REG: return PARAM_MOVREG_GADGET_JMP_REG;\n        case GadgetType::AMOV_REG: return PARAM_AMOVREG_GADGET_JMP_REG;\n        case GadgetType::MOV_CST: return PARAM_MOVCST_GADGET_JMP_REG;\n        case GadgetType::AMOV_CST: return PARAM_AMOVCST_GADGET_JMP_REG;\n        case GadgetType::LOAD: return PARAM_LOAD_GADGET_JMP_REG;\n        case GadgetType::ALOAD: return PARAM_ALOAD_GADGET_JMP_REG;\n        case GadgetType::STORE: return PARAM_STORE_GADGET_JMP_REG;\n        case GadgetType::ASTORE: return PARAM_ASTORE_GADGET_JMP_REG;\n        case GadgetType::SYSCALL: return PARAM_SYSCALL_GADGET_JMP_REG;\n        case GadgetType::INT80: return PARAM_INT80_GADGET_JMP_REG;\n        default:\n            throw runtime_exception(\"Node::get_param_num_gadget_jmp_reg(): got unsupported gadget type\");\n    }\n}\n\nint Node::get_param_num_dst_reg(){\n    switch( type ){\n        case GadgetType::MOV_REG: return PARAM_MOVREG_DST_REG;\n        case GadgetType::AMOV_REG: return PARAM_AMOVREG_DST_REG;\n        case GadgetType::MOV_CST: return PARAM_MOVCST_DST_REG;\n        case GadgetType::AMOV_CST: return PARAM_AMOVCST_DST_REG;\n        case GadgetType::LOAD: return PARAM_LOAD_DST_REG;\n        case GadgetType::ALOAD: return PARAM_ALOAD_DST_REG;\n        default:\n            throw runtime_exception(\"Node::get_param_num_dst_reg(): got unsupported gadget type\");\n    }\n}\n\nint Node::get_param_num_src_reg(){\n    switch( type ){\n        case GadgetType::MOV_REG: return PARAM_MOVREG_SRC_REG;\n        case GadgetType::MOV_CST: return PARAM_MOVCST_DST_REG;\n        case GadgetType::AMOV_CST: return PARAM_AMOVCST_SRC_REG;\n        case GadgetType::STORE: return PARAM_STORE_SRC_REG;\n        case GadgetType::ASTORE: return PARAM_ASTORE_SRC_REG;\n        default:\n            throw runtime_exception(\"Node::get_param_num_src_reg(): got unsupported gadget type\");\n    }\n}\n\nint Node::get_param_num_src_addr_offset(){\n    switch( type ){\n        case GadgetType::LOAD: return PARAM_LOAD_SRC_ADDR_OFFSET;\n        case GadgetType::ALOAD: return PARAM_ALOAD_SRC_ADDR_OFFSET;\n        default:\n            throw runtime_exception(\"Node::get_param_num_src_addr_offset(): got unsupported gadget type\");\n    }\n}\n\nint Node::get_param_num_src_addr_reg(){\n    switch( type ){\n        case GadgetType::LOAD: return PARAM_LOAD_SRC_ADDR_REG;\n        case GadgetType::ALOAD: return PARAM_ALOAD_SRC_ADDR_REG;\n        default:\n            throw runtime_exception(\"Node::get_param_num_src_addr_offset(): got unsupported gadget type\");\n    }\n}\n\nint Node::get_param_num_dst_addr_offset(){\n    switch( type ){\n        case GadgetType::STORE: return PARAM_STORE_DST_ADDR_OFFSET;\n        case GadgetType::ASTORE: return PARAM_ASTORE_DST_ADDR_OFFSET;\n        default:\n            throw runtime_exception(\"Node::get_param_num_dst_addr_offset(): got unsupported gadget type\");\n    }\n}\n\nint Node::get_param_num_dst_addr_reg(){\n    switch( type ){\n        case GadgetType::STORE: return PARAM_STORE_DST_ADDR_REG;\n        case GadgetType::ASTORE: return PARAM_ASTORE_DST_ADDR_REG;\n        default:\n            throw runtime_exception(\"Node::get_param_num_dst_addr_offset(): got unsupported gadget type\");\n    }\n}\n\nbool Node::modifies_reg(int reg_num){\n        return (affected_gadget->modified_regs[reg_num]);\n}\n\naddr_t _get_valid_gadget_address(Gadget* gadget, Arch* arch, Constraint* constraint){\n    for( addr_t addr : gadget->addresses){\n        if( !constraint || constraint->bad_bytes.is_valid_address(addr, arch->octets))\n            return addr;\n    }\n    throw strategy_exception(\"Fatal error: couldn't get valid gadget address. This should not happen ! \");\n}\n\nbool Node::assign_gadget(Gadget* gadget, Arch* arch, Constraint* constraint){\n    addr_t addr;\n    try{\n        addr = _get_valid_gadget_address(gadget, arch, constraint);\n    }catch(strategy_exception& e){\n        return false;\n    }\n\n    affected_gadget = gadget;\n    // Set gadget parameters depending on type (but dont change the name!)\n    params[get_param_num_gadget_addr()].value = addr;\n    params[get_param_num_gadget_sp_inc()].value = gadget->sp_inc;\n    params[get_param_num_gadget_jmp_reg()].value = gadget->jmp_reg;\n    params[get_param_num_gadget_sp_delta()].value = gadget->max_sp_inc - gadget->sp_inc;\n    return true;\n}\n\nvoid Node::apply_assertion(){\n    assertion.clear();\n    node_assertion.to_assertion(*this, &assertion);\n}\n\n/* ===============  Strategy Graphs ============== */\n/* =============================================== */\n\nStrategyGraph::StrategyGraph(): has_gadget_selection(false), _depth(-1){};\n\nvoid StrategyGraph::update_size(){\n    size = 0;\n    for( Node& node: nodes )\n        if( ! node.is_disabled )\n            size++;\n}\n\n/* =========== Basic manips on edges/nodes =========== */\nnode_t StrategyGraph::new_node(GadgetType t){\n    nodes.push_back(Node(nodes.size(), t));\n    // Give names to generic parameters\n    nodes.back().params[nodes.back().get_param_num_gadget_addr()].make_cst( -1, new_name(\"gadget_addr\"));\n    nodes.back().params[nodes.back().get_param_num_gadget_sp_inc()].make_cst( -1, new_name(\"gadget_sp_inc\"));\n    nodes.back().params[nodes.back().get_param_num_gadget_jmp_reg()].make_cst( -1, new_name(\"gadget_jmp_reg\"));\n    nodes.back().params[nodes.back().get_param_num_gadget_sp_delta()].make_cst( -1, new_name(\"gadget_sp_delta\"));\n    return nodes.size()-1;\n}\n\nvoid StrategyGraph::disable_node(node_t node){\n    nodes[node].is_disabled = true;\n    nodes[node].special_paddings.clear();\n}\n\nstring StrategyGraph::new_name(string base){\n    return name_generator.new_name(base);\n}\n\n// Make the dep that point to the parameter 'curr_param_type' on 'curr_node' point to 'new_node'\n// and 'new_param_type' instead.\nbool _redirect_param_dep(ParamDep& dep, node_t curr_node, param_t curr_param_type, node_t new_node, param_t new_param_type){\n    if( dep.node == curr_node && dep.param_type == curr_param_type ){\n        dep.node = new_node;\n        dep.param_type = new_param_type;\n        return true;\n    }else\n        return false;\n}\n\nvoid _redirect_param_deps(Param& param, Node& curr_node, param_t curr_param_type, Node& new_node, param_t new_param_type){\n    for( ParamDep& dep : param.deps ){\n        if( dep.node == curr_node.id && dep.param_type == curr_param_type ){\n            // Change var name in expression if constant\n            if( param.type == ParamType::CST && param.expr != nullptr){\n                Expr e = param.expr->copy();\n                e->replace_var_name(curr_node.params[curr_param_type].name, new_node.params[new_param_type].name); \n                param.expr = e;\n            }\n            // Redirect\n            dep.node = new_node.id;\n            dep.param_type = new_param_type;\n        }\n    }\n}\n\n// Make the edges that point to the parameter 'curr_param_type' on 'curr_node' point to 'new_node'\n// and 'new_param_type'.\nvoid StrategyGraph::redirect_param_edges(node_t curr_node, param_t curr_param_type, node_t new_node, param_t new_param_type){\n    for( node_t p = 0; p < nodes.size(); p++ ){\n        Node& prev = nodes[p];\n        // Redirect parameters\n        for( int p = 0; p < prev.nb_params(); p++ ){\n            _redirect_param_deps(prev.params[p], nodes[curr_node], curr_param_type, nodes[new_node], new_param_type);\n        }\n        // Redirect special paddings\n        for( ROPPadding& padd : prev.special_paddings ){\n            _redirect_param_deps(padd.offset, nodes[curr_node], curr_param_type, nodes[new_node], new_param_type);\n            _redirect_param_deps(padd.value, nodes[curr_node], curr_param_type, nodes[new_node], new_param_type);\n        }\n    }\n}\n\nvoid StrategyGraph::redirect_incoming_strategy_edges(node_t curr_node, node_t new_node){\n    Node& curr = nodes[curr_node];\n    Node& newn = nodes[new_node];\n    \n    for( node_t p : curr.strategy_edges.in ){\n        Node& prev = nodes[p];\n        if( std::count(prev.strategy_edges.out.begin(), prev.strategy_edges.out.end(), curr_node) > 0 ){\n            // Erase previous outgoing edges to curr\n            prev.strategy_edges.out.erase(std::remove(prev.strategy_edges.out.begin(), prev.strategy_edges.out.end(), curr_node), prev.strategy_edges.out.end());\n            if( new_node != prev.id ){ // If new node depends on curr_node, don't redirect it to itself\n                // Add new outgoing to new_node\n                prev.add_outgoing_strategy_edge(new_node);\n                // Add new incoming in new_node\n                newn.add_incoming_strategy_edge(prev.id);\n            }\n        }\n    }\n}\n\nvoid StrategyGraph::redirect_outgoing_strategy_edges(node_t curr_node, node_t new_node){\n    Node& curr = nodes[curr_node];\n    Node& newn = nodes[new_node];\n    \n    for( node_t n : curr.strategy_edges.out ){\n        Node& next = nodes[n];\n        if( std::count(next.strategy_edges.in.begin(), next.strategy_edges.in.end(), curr_node) > 0 ){\n            // Erase next incoming edges from curr\n            next.strategy_edges.in.erase(std::remove(next.strategy_edges.in.begin(), next.strategy_edges.in.end(), curr_node), next.strategy_edges.in.end());\n            if( new_node != next.id ){ // If new node depends on curr_node, don't redirect it to itself\n                // Add new incoming from new_node\n                next.add_incoming_strategy_edge(new_node);\n                // Add new outgoing in new_node\n                newn.add_outgoing_strategy_edge(next.id);\n            }\n        }\n    }\n}\n\n\nvoid StrategyGraph::redirect_generic_param_edges(node_t curr_node, node_t new_node){\n    Node& curr = nodes[curr_node];\n    Node& newn = nodes[new_node];\n    redirect_param_edges(curr_node, curr.get_param_num_gadget_addr(), new_node, newn.get_param_num_gadget_addr());\n    redirect_param_edges(curr_node, curr.get_param_num_gadget_jmp_reg(), new_node, newn.get_param_num_gadget_jmp_reg());\n    redirect_param_edges(curr_node, curr.get_param_num_gadget_sp_inc(), new_node, newn.get_param_num_gadget_sp_inc());\n    redirect_param_edges(curr_node, curr.get_param_num_gadget_sp_delta(), new_node, newn.get_param_num_gadget_sp_delta());\n\n}\n\nvoid StrategyGraph::add_strategy_edge(node_t from, node_t to){\n    nodes[from].add_outgoing_strategy_edge(to);\n    nodes[to].add_incoming_strategy_edge(from);\n}\n\nvoid StrategyGraph::add_param_edge(node_t from, node_t to){\n    nodes[from].add_outgoing_param_edge(to);\n    nodes[to].add_incoming_param_edge(from);\n}\n\nvoid StrategyGraph::add_interference_edge(node_t from, node_t to){\n    nodes[from].interference_edges.out.push_back(to);\n    // We don't add an incoming edge in 'to' for interference edges\n    // because they are not used\n}\n\n// Update all parameter edges according to params and paddings\nvoid StrategyGraph::update_param_edges(){\n    // First, clear all param edges\n    for( Node& node : nodes ){\n        node.param_edges.in.clear();\n        node.param_edges.out.clear();\n    }\n\n    // Check every couple of nodes\n    for( int n1 = 0; n1 < nodes.size(); n1++ ){\n        Node& node1 = nodes[n1];\n        // Check params\n        for( int p = 0; p < node1.nb_params(); p++){\n            Param& param = node1.params[p];\n            for( ParamDep& dep : param.deps ){\n                add_param_edge(n1, dep.node);\n            }\n        }\n        // Check paddings\n        for( ROPPadding& padd : node1.special_paddings){\n            for( ParamDep& dep : padd.offset.deps ){\n                if( dep.node != n1 )\n                    add_param_edge(n1, dep.node);\n            }\n            for( ParamDep& dep : padd.value.deps ){\n                if( dep.node != n1 )\n                    add_param_edge(n1, dep.node);\n            }\n        }\n    }\n}\n\nvoid StrategyGraph::clear_interference_edges(node_t n){\n    nodes[n].interference_edges.out.clear();\n}\n\n\nbool StrategyGraph::modifies_reg(node_t n, int reg_num, bool check_following_node){\n    bool res = nodes[n].modifies_reg(reg_num);\n    if( check_following_node && nodes[n].mandatory_following_node != -1)\n        return res || modifies_reg(nodes[n].mandatory_following_node, reg_num, true);\n    else\n        return res;\n}\n\nbool StrategyGraph::has_dependent_param(node_t n, param_t param){\n    for( node_t prev : nodes[n].param_edges.in ){\n        for( int p = 0; p < nodes[prev].nb_params(); p++ ){\n            for( ParamDep& dep : nodes[prev].params[p].deps){\n                if( dep.node == n && dep.param_type == param)\n                    return true;\n            }\n        }\n    }\n    return false;\n}\n\n\n/* ===============  Ordering ============== */\nvoid StrategyGraph::_dfs_strategy_explore(vector<node_t>& marked, node_t n){\n    if( nodes[n].is_disabled || nodes[n].is_indirect || std::count(dfs_strategy.begin(), dfs_strategy.end(), n))\n        return; // Ignore disabled or indirect nodes\n    if( std::count(marked.begin(), marked.end(), n) != 0 ){\n        throw runtime_exception(\"StrategyGraph: strategy DFS: unexpected cycle detected!\");\n    }else{\n        marked.push_back(n);\n    }\n    for( node_t n2 : nodes[n].strategy_edges.out ){\n        _dfs_strategy_explore(marked, n2);\n    }\n    dfs_strategy.push_back(n);\n}\n\nvoid StrategyGraph::compute_dfs_strategy(){\n    vector<node_t> marked;\n    dfs_strategy.clear();\n    for( Node& node : nodes ){\n        if( node.is_disabled || (std::count(marked.begin(), marked.end(), node.id) != 0))\n            continue;\n        else\n            _dfs_strategy_explore(marked, node.id);\n    }\n}\n\nvoid StrategyGraph::_dfs_params_explore(vector<node_t>& marked, node_t n){\n    if( std::count(dfs_params.begin(), dfs_params.end(), n))\n        return; // Ignore already visited nodes\n    // Note: we don't ignore disabled nodes because they can hold constants parameters\n    // from which other nodes depend\n    if( std::count(marked.begin(), marked.end(), n) != 0 ){\n        throw runtime_exception(\"StrategyGraph: params DFS: unexpected cycle detected!\");\n    }else{\n        marked.push_back(n);\n    }\n    for( node_t n2 : nodes[n].param_edges.out ){\n        _dfs_params_explore(marked, n2);\n    }\n    marked.pop_back(); // Unmark the node for the current exploration\n    dfs_params.push_back(n);\n}\n\nvoid StrategyGraph::compute_dfs_params(){\n    vector<node_t> marked;\n    dfs_params.clear();\n    for( Node& node : nodes ){\n        if( node.is_disabled || (std::count(marked.begin(), marked.end(), node.id) != 0))\n            continue;\n        else\n            _dfs_params_explore(marked, node.id);\n    }\n}\n\n// Returns false <=> the graph contains a cycle\nbool StrategyGraph::_dfs_scheduling_explore(vector<node_t>& marked, node_t n){\n    if( nodes[n].is_disabled || std::count(dfs_scheduling.begin(), dfs_scheduling.end(), n))\n        return true; // Ignore disabled or indirect nodes or already visited ones\n\n    if( std::count(marked.begin(), marked.end(), n) != 0 ){\n        // Cycle detected !\n        return false;\n    }else{\n        marked.push_back(n);\n    }\n    \n    \n    for( node_t n2 : nodes[n].strategy_edges.out ){\n        if( n2 == nodes[n].mandatory_following_node )\n            continue;\n        if( ! _dfs_scheduling_explore(marked, n2))\n            return false;\n    }\n    for( node_t n2 : nodes[n].interference_edges.out){\n        if( n2 == nodes[n].mandatory_following_node )\n            continue;\n        if( ! _dfs_scheduling_explore(marked, n2))\n            return false;\n    }\n    // Do mandatory node in the end if any\n    if( nodes[n].mandatory_following_node != -1 ){\n        if( ! _dfs_scheduling_explore(marked, nodes[n].mandatory_following_node))\n            return false;\n    }\n    \n    dfs_scheduling.push_back(n);\n    return true;\n}\n\nbool StrategyGraph::compute_dfs_scheduling(){\n    vector<node_t> marked;\n    dfs_scheduling.clear();\n    for( Node& node : nodes ){\n        if( node.is_disabled || node.is_indirect || \n                (std::count(marked.begin(), marked.end(), node.id) != 0)){\n            continue;\n        }else{\n            if( ! _dfs_scheduling_explore(marked, node.id) ){\n                return false; // Cycle detected\n            }\n        }\n    }\n    return true;\n}\n\n/* =============== Gadget Selection ============== */\n// Get the concrete value for parameters depending on other \n// gadgets. This functions expects all the parameters in nodes that\n// are used by the 'param' argument to have been resolved already\nvoid StrategyGraph::_resolve_param(Param& param){\n    if( param.is_dependent()){\n        if( param.type == ParamType::REG ){\n            param.value = nodes[param.deps[0].node].params[param.deps[0].param_type].value;\n        }else if( param.type == ParamType::CST){\n            if( param.expr == nullptr ){\n                // If not expr, just take the value of the other param\n                param.value = nodes[param.deps[0].node].params[param.deps[0].param_type].value;\n            }else{\n                param.value = param.expr->concretize(&params_ctx);\n            }\n        }else{\n            throw runtime_exception(\"_resolve_param(): got unsupported param type\");\n        }\n    }\n    // If constant, update the context\n    if( param.type == ParamType::CST){\n        params_ctx.set(param.name, param.value);\n    }\n}\n\nvoid StrategyGraph::_resolve_all_params(node_t n){\n    Node& node = nodes[n];\n    // Resolve normal parameters\n    for( int p = 0; p < node.nb_params(); p++){\n        _resolve_param(node.params[p]);\n    }\n    // Resolve special paddings\n    for( ROPPadding& padd : node.special_paddings ){\n        _resolve_param(padd.offset);\n        _resolve_param(padd.value);   \n    }\n}\n\n\n// Wrapper that queries the database to find the list of gadgets that match\n// a strategy node\nconst vector<Gadget*>& StrategyGraph::_get_matching_gadgets(GadgetDB& db, node_t n){\n    Node& node = nodes[n];\n    reg_t src_reg, src_reg2, dst_reg, dst_addr_reg, src_addr_reg;\n    cst_t src_cst, src_addr_cst, dst_addr_cst;\n    Op src_op, op;\n\n    // resolve parameters for node 'n'\n    _resolve_all_params(n);\n\n    switch( node.type ){\n        // make query\n        case GadgetType::MOV_REG:\n            src_reg = node.params[PARAM_MOVREG_SRC_REG].value;\n            dst_reg = node.params[PARAM_MOVREG_DST_REG].value;\n            return db.get_mov_reg(dst_reg, src_reg);\n        case GadgetType::MOV_CST:\n            dst_reg = node.params[PARAM_MOVCST_DST_REG].value;\n            src_cst = node.params[PARAM_MOVCST_SRC_CST].value;\n            return db.get_mov_cst(dst_reg, src_cst);\n        case GadgetType::AMOV_CST:\n            dst_reg = node.params[PARAM_AMOVCST_DST_REG].value;\n            src_reg = node.params[PARAM_AMOVCST_SRC_REG].value;\n            src_op = (Op)node.params[PARAM_AMOVCST_SRC_OP].value;\n            src_cst = node.params[PARAM_AMOVCST_SRC_CST].value;\n            return db.get_amov_cst(dst_reg, src_reg, src_op, src_cst);\n        case GadgetType::AMOV_REG:\n            dst_reg = node.params[PARAM_AMOVREG_DST_REG].value;\n            src_reg = node.params[PARAM_AMOVREG_SRC_REG1].value;\n            src_op = (Op)node.params[PARAM_AMOVREG_SRC_OP].value;\n            src_reg2 = node.params[PARAM_AMOVREG_SRC_REG2].value;\n            return db.get_amov_reg(dst_reg, src_reg, src_op, src_reg2);\n        case GadgetType::LOAD:\n            dst_reg = node.params[PARAM_LOAD_DST_REG].value;\n            src_addr_reg = node.params[PARAM_LOAD_SRC_ADDR_REG].value;\n            src_addr_cst = node.params[PARAM_LOAD_SRC_ADDR_OFFSET].value;\n            return db.get_load(dst_reg, src_addr_reg, src_addr_cst);\n        case GadgetType::ALOAD:\n            dst_reg = node.params[PARAM_ALOAD_DST_REG].value;\n            op = (Op)node.params[PARAM_ALOAD_OP].value;\n            src_addr_reg = node.params[PARAM_ALOAD_SRC_ADDR_REG].value;\n            src_addr_cst = node.params[PARAM_ALOAD_SRC_ADDR_OFFSET].value;\n            return db.get_aload(dst_reg, op, src_addr_reg, src_addr_cst);\n        case GadgetType::STORE:\n            dst_addr_reg = node.params[PARAM_STORE_DST_ADDR_REG].value;\n            dst_addr_cst = node.params[PARAM_STORE_DST_ADDR_OFFSET].value;\n            src_reg = node.params[PARAM_STORE_SRC_REG].value;\n            return db.get_store(dst_addr_reg, dst_addr_cst, src_reg);\n        case GadgetType::ASTORE:\n            dst_addr_reg = node.params[PARAM_ASTORE_DST_ADDR_REG].value;\n            dst_addr_cst = node.params[PARAM_ASTORE_DST_ADDR_OFFSET].value;\n            op = (Op)node.params[PARAM_ASTORE_OP].value;\n            src_reg = node.params[PARAM_ASTORE_SRC_REG].value;\n            return db.get_astore(dst_addr_reg, dst_addr_cst, op, src_reg);\n        case GadgetType::SYSCALL:\n            return db.get_syscall();\n        case GadgetType::INT80:\n            return db.get_int80();\n        default:\n            throw runtime_exception(QuickFmt() << \"_get_matching_gadgets(): got unsupported node type \" << (int)node.type >> QuickFmt::to_str);\n    }\n}\n\n// Wrapper to the database to get a list of gadgets that match a strategy node\n// that still has non-resolved (also called 'free') parameters. \n// \n// For example it can find all gadgets that match a node: X = ecx + Y\n// and return :\n//   - mov edx, ecx\n//   - add ecx, esi\n//   ... \n\nPossibleGadgets* StrategyGraph::_get_possible_gadgets(GadgetDB& db, node_t n){\n    Node& node = nodes[n];\n    bool params_status[MAX_PARAMS];\n    int p;\n\n    // resolve parameters for node 'n'\n    _resolve_all_params(n);\n\n    // Fill a table with parameters status (free or not)\n    for( p = 0; p < node.nb_params(); p++){\n        params_status[p] = node.params[p].is_free();\n    }\n    // Make the query to the db\n    switch( node.type ){\n        case GadgetType::MOV_REG:\n            return db.get_possible_mov_reg(node.params[PARAM_MOVREG_DST_REG].value,\n                                           node.params[PARAM_MOVREG_SRC_REG].value,\n                                           params_status); \n        case GadgetType::AMOV_REG:\n            return db.get_possible_amov_reg(node.params[PARAM_AMOVREG_DST_REG].value,\n                                           node.params[PARAM_AMOVREG_SRC_REG1].value,\n                                           (Op)node.params[PARAM_AMOVREG_SRC_OP].value,\n                                           node.params[PARAM_AMOVREG_SRC_REG2].value,\n                                           params_status); \n        case GadgetType::MOV_CST:\n            return db.get_possible_mov_cst(node.params[PARAM_MOVCST_DST_REG].value,\n                                           node.params[PARAM_MOVCST_SRC_CST].value,\n                                           params_status);\n        case GadgetType::AMOV_CST:\n            return db.get_possible_amov_cst(node.params[PARAM_AMOVCST_DST_REG].value,\n                                           node.params[PARAM_AMOVCST_SRC_REG].value,\n                                           (Op)node.params[PARAM_AMOVCST_SRC_OP].value,\n                                           node.params[PARAM_AMOVCST_SRC_CST].value,\n                                           params_status); \n        case GadgetType::LOAD:\n            return db.get_possible_load(node.params[PARAM_LOAD_DST_REG].value,\n                                        node.params[PARAM_LOAD_SRC_ADDR_REG].value,\n                                        node.params[PARAM_LOAD_SRC_ADDR_OFFSET].value,\n                                        params_status);\n        case GadgetType::ALOAD:\n            return db.get_possible_aload(node.params[PARAM_ALOAD_DST_REG].value,\n                                        (Op)node.params[PARAM_ALOAD_OP].value,\n                                        node.params[PARAM_ALOAD_SRC_ADDR_REG].value,\n                                        node.params[PARAM_ALOAD_SRC_ADDR_OFFSET].value,\n                                        params_status);\n        case GadgetType::STORE:\n            return db.get_possible_store(node.params[PARAM_STORE_DST_ADDR_REG].value,\n                                        node.params[PARAM_STORE_DST_ADDR_OFFSET].value,\n                                        node.params[PARAM_STORE_SRC_REG].value,\n                                        params_status);\n        case GadgetType::ASTORE:\n            return db.get_possible_astore(node.params[PARAM_ASTORE_DST_ADDR_REG].value,\n                                        node.params[PARAM_ASTORE_DST_ADDR_OFFSET].value,\n                                        (Op)node.params[PARAM_ASTORE_OP].value,\n                                        node.params[PARAM_ASTORE_SRC_REG].value,\n                                        params_status);\n        default:\n            throw runtime_exception(\"_get_possible_gadgets(): got unsupported gadget type!\");\n    }\n}\n\n// Must be checked after parameter resolution\nbool StrategyGraph::_check_strategy_constraints(Node& node, Arch* arch){\n    for( constraint_callback_t constr : node.strategy_constraints ){\n        if( ! constr(&node, this, arch))\n            return false;\n    }\n    return true;\n}\n\n// Must be checked after parameter resolution (padding resolution more precisely)\nbool StrategyGraph::_check_special_padding_constraints(Node& node, Arch* arch, Constraint* constraint){\n    if( !constraint )\n        return true;\n    for( ROPPadding& padd: node.special_paddings ){\n        if( !constraint->bad_bytes.is_valid_address(padd.value.value, arch->octets))\n            return false;\n    }\n    return true;\n}\n\n// Must be checked after gadget assignment\nbool StrategyGraph::_check_assigned_gadget_constraints(Node& node, Arch* arch){\n    for( constraint_callback_t constr : node.assigned_gadget_constraints ){\n        if( ! constr(&node, this, arch))\n            return false;\n    }\n    return true;\n}\n\n/* This function tries to find a gadget selection for a strategy graph.\n It iteratively (the order is the one of the DFS on parameter dependencies) resolves\n parameters and queries the database to find a matching gadget on each node of the\n strategy graph.  \n*/\nbool StrategyGraph::select_gadgets(GadgetDB& db, Constraint* constraint, Arch* arch, int dfs_idx){\n    // Check if constraint is specified with an architecture\n    if( constraint && !arch){\n        throw runtime_exception(\"StrategyGraph::select_gadget(): should NEVER be called with a non-NULL constraint and a NULL arch\");\n    }\n\n    // Check if SIGINT\n    if( is_pending_sigint()){\n        return false;\n    }\n\n    // Otherwise do proper gadget selection : \n\n    // If root call\n    if( dfs_idx == -1 ){\n        compute_dfs_params();\n        compute_dfs_strategy();\n        params_ctx = VarContext(); // New context for params\n        has_gadget_selection = select_gadgets(db, constraint, arch, 0);\n        return has_gadget_selection;\n    }\n\n    if( dfs_idx >= dfs_params.size()){\n        return schedule_gadgets();\n    }\n\n    node_t n = dfs_params[dfs_idx];\n    Node& node = nodes[n];\n\n    // If the node is a disabled node, juste resolve the parameters\n    // and continue the selection \n    if( node.is_disabled){\n        _resolve_all_params(n);\n        // Continue to select from next node\n        if( select_gadgets(db, constraint, arch, dfs_idx+1) )\n                return true;\n        else\n                return false;\n    }\n\n    // 1. Try all possibilities for parameters\n    if( node.has_free_param() ){\n        // Get possible gadgets\n        PossibleGadgets* possible = _get_possible_gadgets(db, node.id);\n        // 2.a. Try all possible params\n        for( auto pos: possible->gadgets ){\n            // Update free params\n            for( int p = 0; p < node.nb_params(); p++){\n                if( node.params[p].is_free())\n                    node.params[p].value = pos.first[p];\n                if( node.params[p].is_cst())\n                    params_ctx.set(node.params[p].name, node.params[p].value);\n            }\n            // Resolve params again (useful for special paddings that depend\n            // on regular parameters such as offsets, etc)\n            _resolve_all_params(node.id);\n\n            // Check strategy constraints \n            if( !_check_strategy_constraints(node, arch) || !_check_special_padding_constraints(node, arch, constraint)){\n                continue;\n            }\n\n            // Prepare assertion for current parameter choice\n            node.apply_assertion();\n\n            // 2.b Try all possible gadgets\n            for( Gadget* gadget : *(pos.second) ){\n                if( ! node.assign_gadget(gadget, arch, constraint))\n                    continue;\n\n                // Resolve params once again (useful for special paddings that depend\n                // on gadget specific parameters such as gadget_addr, gadget_sp_inc, etc)\n                _resolve_all_params(node.id);\n\n                // Check assigned gadget constraints and global constraint\n                if( !_check_assigned_gadget_constraints(node, arch) || (constraint && !constraint->check(gadget, arch, &node.assertion))){\n                    continue;\n                }\n                // 3. Recursive call on next node \n                if( select_gadgets(db, constraint, arch, dfs_idx+1)){\n                    delete possible; possible = nullptr;\n                    return true;\n                }\n            }\n        }\n        delete possible; possible = nullptr;\n    }else{\n\n        // Check strategy constraints \n        if( _check_strategy_constraints(node, arch)){\n\n            // Get matching gadgets\n            const vector<Gadget*>& gadgets = _get_matching_gadgets(db, node.id);\n\n            // 2. Try all possible gadgets (or a subset)\n            for( Gadget* gadget : gadgets ){\n                if( ! node.assign_gadget(gadget, arch, constraint))\n                    continue;\n\n                // Resolve params again (useful for special paddings that depend\n                // on regular parameters such as offsets, etc)\n                _resolve_all_params(node.id);\n                \n                // Check if paddings have valid values (no bad bytes)\n                if( !_check_special_padding_constraints(node, arch, constraint))\n                    continue;\n\n                // Prepare assertion for current parameter choice\n                node.apply_assertion();\n\n                // Check assigned gadget constraints and global constraint\n                if( !_check_assigned_gadget_constraints(node, arch) || (constraint && !constraint->check(gadget, arch, &node.assertion))){\n                    continue;\n                }\n                // 3. Recursive call on next node\n                if( select_gadgets(db, constraint, arch, dfs_idx+1) ){\n                    return true;\n                }\n            }\n        }\n    }\n    return false;\n}\n\n/* ==================== Scheduling ======================= */\nvoid StrategyGraph::compute_interference_points(){\n    // Clear previous points if any\n    interference_points.clear();\n    \n    // 1. Compute interfering points for regs\n    for( Node& node : nodes ){\n        if( node.is_disabled ) // Allow indirect nodes though since they will be executed and interfere \n            continue;\n        for( int p = 0; p < node.nb_params(); p++ ){\n            Param& param = node.params[p];\n            if( ! param.is_data_link )\n                continue;\n            // Check if this link is modified by another node (other)\n            for( Node& other : nodes ){\n                // If disabled, indirect, or one part of the data link, ignore\n                if( other.is_disabled || param.depends_on(other.id) || other.id == node.id)\n                    continue;\n                if( modifies_reg(other.id, param.value, true)){ // True to check also mandatory_following_gadgets\n                    // Add interfering point\n                    if( node.is_initial_param(p) && !has_dependent_param(node.id, p)){\n                        // If the param is initial (input in the chain), other has to be after (can never be before)\n                        interference_points.push_back(InterferencePoint(other.id, -1, node.id));\n                    }else if( node.is_final_param(p)){\n                        // If the param is final (an output of the chain), other must be before\n                        interference_points.push_back(InterferencePoint(other.id, node.id, -1));\n                    }else{\n                        // Add the first param dependency (since it is for regs we assume there's only one dependency)\n                        interference_points.push_back(InterferencePoint(other.id, node.id, param.deps[0].node));\n                    }\n                }\n            }\n        }\n    }\n}\n\nbool StrategyGraph::_do_scheduling(int interference_idx){\n    bool success = false;\n    if( interference_idx == interference_points.size() ){\n        // All choices where made for interference edges, try to schedule\n        return compute_dfs_scheduling();\n    }else{\n        // Need to make a choice\n        InterferencePoint& inter = interference_points[interference_idx];\n        // Choice 1, put it BEFORE\n        if( inter.start_node != -1 ){\n            EdgeSet saved_edges = nodes[inter.interfering_node].interference_edges; // Save current edges state\n            add_interference_edge(inter.interfering_node, inter.start_node);\n            if( inter.end_node != -1 ){\n                add_interference_edge(inter.interfering_node, inter.end_node);\n            }\n            if( _do_scheduling(interference_idx+1) ){\n                success = true;\n            }\n            nodes[inter.interfering_node].interference_edges = saved_edges; // Restore edges state\n            if( success )\n                return true;\n        }\n\n        if( inter.end_node != -1 ){\n            EdgeSet saved_start_edges;\n            EdgeSet saved_end_edges = nodes[inter.end_node].interference_edges; // Save current edges state\n            // Choice 2, put if AFTER\n            if( inter.start_node != -1 ){\n                saved_start_edges = nodes[inter.start_node].interference_edges; // Save current edges state\n                add_interference_edge(inter.start_node, inter.interfering_node);\n            }\n            add_interference_edge( inter.end_node, inter.interfering_node);\n            if( _do_scheduling( interference_idx+1 ) ){\n                success = true;\n            }\n            \n            if( inter.start_node != -1 ){\n                nodes[inter.start_node].interference_edges = saved_start_edges; // Restore interference edges\n            }\n            nodes[inter.end_node].interference_edges = saved_end_edges; // Restore interference edges\n        }\n\n        return success;\n    }\n}\n\nbool StrategyGraph::schedule_gadgets(){\n    bool success = false;\n\n    // Compute inteference points\n    compute_interference_points();\n    // Go through all interference points and try both possibilities\n    // (interfering gadget goes BEFORE or AFTER both linked nodes)\n    success = _do_scheduling();\n    // Clean-up\n    interference_points.clear();\n    // Return\n    return success;\n}\n\n\n/* Function that builds a ROPChain from a valid gadget selection\n   ==> If no valid selection has been computed for the graph, it \n       returns a NULL pointer\n*/\nROPChain* StrategyGraph::get_ropchain(Arch* arch, Constraint* constraint){\n    vector<node_t>::reverse_iterator rit;\n    cst_t default_padding;\n    ROPPadding padding;\n    int padding_num = -1;\n\n    // Check if there is a selection in the nodes\n    if( !has_gadget_selection ){\n        return nullptr;\n    }\n\n    // Get default padding (validate against bad_bytes if constraint specified)\n    default_padding = constraint ? constraint->bad_bytes.get_valid_padding(arch->octets) : cst_sign_trunc(arch->bits, -1);\n\n    ROPChain* ropchain = new ROPChain(arch);\n    for( rit = dfs_scheduling.rbegin(); rit != dfs_scheduling.rend(); rit++ ){\n        Node& node = nodes[*rit];\n        if( node.is_indirect ){\n            continue; // Skip indirect nodes\n        }\n\n        // Add gadget\n        ropchain->add_gadget(node.params[node.get_param_num_gadget_addr()].value, node.affected_gadget);\n\n        // Order paddings by offset\n        std::sort(nodes[*rit].special_paddings.begin(), nodes[*rit].special_paddings.end(), \n            [](const ROPPadding& padd1, const ROPPadding& padd2){\n                return padd1.offset.value < padd2.offset.value;\n                });\n\n        // Init padding iterator\n        if( !node.special_paddings.empty()){\n            padding = node.special_paddings[0];\n            padding_num = 0;\n        }\n\n        // Get number of paddings depending on sp_inc\n        int nb_paddings = node.affected_gadget->sp_inc / arch->octets;\n        if( node.affected_gadget->branch_type == BranchType::RET ){\n            nb_paddings--;\n        }\n        // Check number of paddings according to special paddings\n        if( !node.special_paddings.empty() && \n                (node.special_paddings.back().offset.value/arch->octets)+1 > nb_paddings){\n            nb_paddings = (node.special_paddings.back().offset.value/arch->octets)+1;\n        }\n\n        for( int offset = 0; offset < nb_paddings*arch->octets; offset += arch->octets){\n            // If special padding\n            if( padding_num != -1 && padding.offset.value == offset ){\n                // If the padding is a gadget address (indirect gadget), add a info msg\n                string msg = \"\";\n                if( padding.value.is_dependent() && padding.value.deps[0].param_type == nodes[padding.value.deps[0].node].get_param_num_gadget_addr()){\n                    msg = nodes[padding.value.deps[0].node].affected_gadget->asm_str;\n                    ropchain->add_gadget_address(cst_sign_trunc(arch->bits, padding.value.value), msg);\n                }else{\n                    ropchain->add_padding(cst_sign_trunc(arch->bits, padding.value.value), msg);\n                }\n\n                // Step to next special padding (if any)\n                if( padding_num == node.special_paddings.size()-1 ){\n                    // No more special paddings\n                    padding_num = -1;\n                }else{\n                    // Next special padding\n                    padding = nodes[*rit].special_paddings[++padding_num];\n                }                \n            }\n            // Else default padding\n            else{\n                ropchain->add_padding(default_padding);\n            }\n        }\n    }\n    return ropchain;\n}\n\nStrategyGraph* StrategyGraph::copy(){\n    StrategyGraph* new_graph = new StrategyGraph();\n    // Copy nodes\n    new_graph->nodes = nodes;\n    // Copy name generator (to avoid create new names for 0 that colision with previous ones)\n    new_graph->name_generator = name_generator;\n    new_graph->_history = _history;\n    return new_graph;\n}\n\n/* ================ Printing =================== */\nostream& operator<<(ostream& os, Param& param){\n    string tab = \"\\t\";\n    os << tab << \"Param:\" << std::endl;\n    os << tab << \"\\t Value: \" << std::dec << param.value << std::endl;\n    os << tab << \"\\t Fixed?: \" << param.is_fixed << std::endl;\n    os << tab << \"\\t Is data link?: \" << param.is_data_link << std::endl;\n    os << tab << \"\\t Depends on : \" << std::endl;\n    for( ParamDep& dep : param.deps){\n        os << tab << \"\\t\\t Node: \" << dep.node << \"  Param: \" << dep.param_type << std::endl;\n    }\n    if( param.expr != nullptr )\n        os << tab << \"\\t Expr: \" << param.expr << std::endl;\n    if( !param.name.empty())\n        os << tab << \"\\t Name: \" << param.name << std::endl;\n    return os;\n}\n\nostream& operator<<(ostream& os, Node& node){\n    os << \"Node \" << std::dec << node.id << \":\";\n    if( node.is_disabled )\n        os << \" ( disabled ) \";\n    if( node.is_indirect )\n        os << \" ( indirect ) \";\n    if( node.affected_gadget != nullptr )\n        os << \"\\n\\tAffected gadget:  \" << node.affected_gadget->asm_str;\n    os << \"\\n\\tGadget type:  \" << (int)node.type;\n    os << \"\\n\\tBranch type:  \" << (int)node.branch_type;\n    os << \"\\n\\tIncoming strategy edges: \";\n    for( node_t n : node.strategy_edges.in )\n        os << n << \"  \";\n    os << \"\\n\\tOutgoing strategy edges: \";\n    for( node_t n : node.strategy_edges.out )\n        os << n << \"  \";\n    os << \"\\n\\tIncoming param edges: \";\n    for( node_t n : node.param_edges.in )\n        os << n << \"  \";\n    os << \"\\n\\tOutgoing param edges: \";\n    for( node_t n : node.param_edges.out )\n        os << n << \"  \";\n    \n    os << \"\\n\\tParams: \\n\";\n    for( int p = 0; p < node.nb_params(); p++){\n        os << node.params[p] << std::endl;\n    }\n    \n    os << \"\\n\\tSpecial paddings: \\n\";\n    for( ROPPadding& padding : node.special_paddings){\n        os << \"offset: \" << padding.offset << \", value: \" << padding.value << std::endl;\n    }\n\n    os << std::endl;\n    return os;\n}\n\nostream& operator<<(ostream& os, StrategyGraph& graph){\n    os << \"STRATEGY GRAPH\\n==============\";\n    \n    os << \"\\n\\t History: \" << graph._history;\n    \n    os << \"\\n\\tDFS strategy: \"; \n    for( node_t n : graph.dfs_strategy ){\n        os << n << \" \";\n    }\n    \n    os << \"\\n\\tDFS params: \"; \n    for( node_t n : graph.dfs_params ){\n        os << n << \" \";\n    }\n    \n    os << std::endl;\n    for( Node& n : graph.nodes ){\n        os << n;\n    }\n\n    return os;\n}\n"
  },
  {
    "path": "libropium/compiler/strategy_rules.cpp",
    "content": "#include \"strategy.hpp\"\n#include \"expression.hpp\"\n#include \"exception.hpp\"\n#include <algorithm>\n\n/* =============== Strategy Rules ============== */\n\n/* MovXXX dst_reg, src_xxx\n * =======================\n * (n1) MovReg R1, src_xxx\n * (n2) MovReg dst_reg, R1 \n * ======================= */\nbool StrategyGraph::rule_generic_transitivity(node_t n){\n    \n    int i = 0;\n\n    if( nodes[n].type != GadgetType::MOV_CST &&\n        nodes[n].type != GadgetType::MOV_REG && \n        nodes[n].type != GadgetType::AMOV_CST && \n        nodes[n].type != GadgetType::AMOV_REG &&\n        nodes[n].type != GadgetType::LOAD &&\n        nodes[n].type != GadgetType::ALOAD ){\n        return false;\n    }\n    \n    // Get/Create nodes\n    node_t n1 = new_node(nodes[n].type);\n    node_t n2 = new_node(GadgetType::MOV_REG);\n    Node& node = nodes[n];\n    Node& node1 = nodes[n1];\n    Node& node2 = nodes[n2];\n    \n    \n    node1 = node; // Copy node to node1\n    node1.id = n1; // But keep id\n    // Modify dst_reg\n    node1.params[node1.get_param_num_dst_reg()].make_reg(node2.id, PARAM_MOVREG_SRC_REG);\n    \n    // Set node2 with the reg transitivity gadget\n    node2.params[PARAM_MOVREG_SRC_REG].make_reg(-1, false); // free reg\n    node2.params[PARAM_MOVREG_DST_REG] = node.params[node.get_param_num_dst_reg()]; // Same dst reg as initial query in node\n    \n    // Add data link between node 1 and 2 for the transitive reg\n    node1.params[node1.get_param_num_dst_reg()].is_data_link = true;\n\n    // Node1 must end with a ret\n    node1.branch_type = BranchType::RET;\n    // Node2 same as node\n    node2.branch_type = node.branch_type;\n\n    // Redirect dst reg (to node2) (and datalink of course)\n    redirect_param_edges(node.id, node.get_param_num_dst_reg(), node2.id, node2.get_param_num_dst_reg());\n    redirect_param_edges(node.id, node.get_param_num_data_link(), node2.id, node2.get_param_num_data_link());\n    redirect_param_edges(node.id, node.get_param_num_gadget_jmp_reg(), node2.id, node2.get_param_num_gadget_jmp_reg()); // If JMP\n    redirect_param_edges(node.id, node.get_param_num_gadget_sp_inc(), node2.id, node2.get_param_num_gadget_sp_inc()); // If JMP\n    redirect_param_edges(node.id, node.get_param_num_gadget_sp_delta(), node2.id, node2.get_param_num_gadget_sp_delta()); // If JMP\n\n    // Redirect other input params arcs from node to node1\n    for( i = 0; i < MAX_PARAMS; i++){\n        redirect_param_edges(node.id, i, node1.id, i);\n    }\n\n    // Update param edges\n    update_param_edges();\n\n    // Redirect/add strategy edges\n    add_strategy_edge(node1.id, node2.id);\n    redirect_incoming_strategy_edges(node.id, node1.id);\n    redirect_outgoing_strategy_edges(node.id, node2.id);\n    \n    // Disable previous node\n    disable_node(node.id);\n    \n    // Update size in the end\n    update_size();\n    \n    // Update graph history\n    stringstream ss;\n    ss << _history << \"generic_transitivity(\" << std::dec << n << \")\" << std::endl;\n    _history = ss.str();\n\n    return true;\n}\n\n/* MovCst dst_reg, src_cst\n+ * =======================\n+ * Load dst_reg, mem(SP + off)\n+ * padding(off, src_cst) \n+ * ======================= */\nbool StrategyGraph::rule_mov_cst_pop(node_t n, Arch* arch){\n    \n    if( nodes[n].type != GadgetType::MOV_CST ){\n        return false;\n    }\n\n    if( nodes[n].branch_type == BranchType::JMP ){\n        return false;\n    }\n\n    // Get/Create nodes\n    node_t n1 = new_node(GadgetType::LOAD);\n    Node& node = nodes[n];\n    Node& node1 = nodes[n1];\n\n    // Node1 must have same return type than node\n    node1.branch_type = node.branch_type;\n\n    // Node1 must have same special paddings than node\n    node1.special_paddings = node.special_paddings;\n\n    // Modify parameters\n    node1.params[PARAM_LOAD_DST_REG] = node.params[PARAM_MOVCST_DST_REG];\n    node1.params[PARAM_LOAD_SRC_ADDR_REG].make_reg(arch->sp());\n    node1.params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst(-1, new_name(\"stack_offset\"), false); // Free offset\n\n    // Set special padding at SP  offset to put the constant\n    node1.special_paddings.push_back(ROPPadding());\n    // Offset is the offset at which we pop the constant\n    node1.special_paddings.back().offset.make_cst(n1, PARAM_LOAD_SRC_ADDR_OFFSET, exprvar(arch->bits, node1.params[PARAM_LOAD_SRC_ADDR_OFFSET].name), new_name(\"padding_offset\"));\n    // Padding value is just the constant\n    node1.special_paddings.back().value = node.params[PARAM_MOVCST_SRC_CST];\n    node1.special_paddings.back().value.name = new_name(\"padding_value\"); // Get a new name for the value parameter\n\n    // Add a constraint: the offset of the pop must not be too big (max 240)\n    node1.strategy_constraints.push_back(\n        [](Node* node, StrategyGraph* graph, Arch* arch){\n            return node->params[PARAM_LOAD_SRC_ADDR_OFFSET].value < 240 &&\n                   node->params[PARAM_LOAD_SRC_ADDR_OFFSET].value >= 0;\n        }\n    );\n\n    // Add a constraint: the offset of the pop must not be bigger than the sp inc\n    // and if the gadget is RET then the pop must not correspond to the return address ! \n    node1.assigned_gadget_constraints.push_back(\n        [](Node* node, StrategyGraph* graph, Arch* arch){\n            int adjust=0;\n            // Padding can't overlap return address unless the register we want\n            // to set is PC ;)\n            if( node->affected_gadget->branch_type == BranchType::RET && \n                node->params[PARAM_LOAD_DST_REG].value != arch->pc()){\n                adjust = arch->octets; //\n            }\n            return node->params[PARAM_LOAD_SRC_ADDR_OFFSET].value < node->affected_gadget->sp_inc - adjust;\n        }\n    );\n\n    // Redirect the different params and edges\n    // Generic params\n    redirect_generic_param_edges(node.id, node1.id);\n    redirect_param_edges(node.id, node.get_param_num_data_link(), node1.id, node1.get_param_num_data_link());\n\n    // Redirect strategy edges\n    redirect_incoming_strategy_edges(node.id, node1.id);\n    redirect_outgoing_strategy_edges(node.id, node1.id);\n\n    // Update param edges\n    update_param_edges();\n\n    // Disable node\n    disable_node(node.id);\n    \n    // Update size in the end\n    update_size();\n    \n    // Update graph history\n    stringstream ss;\n    ss << _history << \"mov_cst_pop(\" << std::dec << n << \")\" << std::endl;\n    _history = ss.str();\n    \n    return true;\n}\n\n/* <Any type>; ret\n * ====================\n * R1 <- @(next gadget)\n * <Any type>; jmp R1; \n */\nbool StrategyGraph::rule_generic_adjust_jmp(node_t n, Arch* arch){\n    // Only apply on \"RET\" nodes (and \"ANY\" by extension)\n    if( nodes[n].branch_type != BranchType::RET &&\n        nodes[n].branch_type != BranchType::ANY ){\n        return false;\n    }\n    \n    // Get/Create nodes\n    node_t n1 = new_node(GadgetType::MOV_CST); // Node to adjust the jmp reg\n    node_t n_ret = new_node(GadgetType::LOAD); // Node of the 'adjust gadget' (ret N basically)\n    Node& node = nodes[n];\n    Node& node1 = nodes[n1];\n    Node& node_ret = nodes[n_ret];\n        \n    // Change return type to JMP in node\n    node.branch_type = BranchType::JMP;\n    // Node MUST be followed by the indirect gadget ;)\n    node.mandatory_following_node = node_ret.id;\n    // Node1 (set the jmp reg) MUST be a RET one\n    node1.branch_type = BranchType::RET;\n\n    // Set the 'adjust gadget' node. It must adjust the PC that the next value on the\n    // stack after the 'jmp' gadget is executed\n    node_ret.params[PARAM_LOAD_DST_REG].make_reg(arch->pc()); // Dest reg is PC\n    node_ret.params[PARAM_LOAD_SRC_ADDR_REG].make_reg(arch->sp()); // addr reg is SP (pop from the stack)\n    node_ret.params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst( n, node.get_param_num_gadget_sp_delta(), nullptr, new_name(\"adjust_jmp_offset\"));\n    node_ret.is_indirect = true; // This node is 'indirect' (gadget not added explicitely on the stack)\n\n    // Set the 'pre-jmp' gadget. It sets the jmp reg to the address of the 'adjust gadget'.\n    // Dest reg of node1 is the jmp reg of node\n    node1.params[PARAM_MOVCST_DST_REG].make_reg(n, node.get_param_num_gadget_jmp_reg());\n    // Src cst of node1 is the address of the adjust gadget\n    node1.params[PARAM_MOVCST_SRC_CST].make_cst(node_ret.id, node_ret.get_param_num_gadget_addr(), nullptr, new_name(\"adjust_jmp_addr\"));\n    // Add data link between node1 and node (the jmp reg must NOT be clobbered after it was set to \n    // point to the adjust gadget\n    node1.params[PARAM_MOVCST_DST_REG].is_data_link = true;\n\n    // Redirect strategy edges\n    redirect_incoming_strategy_edges(node.id, node1.id);\n    add_strategy_edge(node1.id, node.id); // Add after so it's not redirected ;)\n    \n    // Add callback that checks that the jmp reg is not implied in the operation\n    // for example if the gadget is mov eax,ebx; jmp ebx; and we adjust ebx then\n    // the semantics are corrupted because ebx's value will be overwritten with \n    // the address of the 'adjust-gadget'\n    node.assigned_gadget_constraints.push_back(\n        [](Node* node, StrategyGraph* graph, Arch* arch){\n            switch( node->type ){\n                case GadgetType::MOV_CST:\n                    return node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_MOVCST_DST_REG].value;\n                case GadgetType::MOV_REG:\n                    return  node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_MOVREG_DST_REG].value && \n                            node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_MOVREG_SRC_REG].value;\n                case GadgetType::AMOV_CST:\n                    return  node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_AMOVCST_DST_REG].value && \n                            node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_AMOVCST_SRC_REG].value;\n                case GadgetType::AMOV_REG:\n                    return  node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_AMOVREG_DST_REG].value && \n                            node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_AMOVREG_SRC_REG1].value &&\n                            node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_AMOVREG_SRC_REG2].value;\n                case GadgetType::LOAD:\n                    return  node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_LOAD_DST_REG].value && \n                            node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_LOAD_SRC_ADDR_REG].value;\n                case GadgetType::ALOAD:\n                    return  node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_ALOAD_DST_REG].value && \n                            node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_ALOAD_SRC_ADDR_REG].value;\n                case GadgetType::STORE:\n                    return  node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_STORE_SRC_REG].value && \n                            node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_STORE_DST_ADDR_REG].value;\n                case GadgetType::ASTORE:\n                    return  node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_ASTORE_SRC_REG].value && \n                            node->params[node->get_param_num_gadget_jmp_reg()].value != node->params[PARAM_ASTORE_DST_ADDR_REG].value;\n                default:\n                    throw runtime_exception(\"rule_generic_adjust_jmp(): constraint callback got unsupported GadgetType! \");\n            }\n        }\n    );\n\n    // Update param edges\n    update_param_edges();\n\n    // Update size in the end\n    update_size();\n\n    // Update graph history\n    stringstream ss;\n    ss << _history << \"generic_adjust_jmp(\" << std::dec << n << \")\" << std::endl;\n    _history = ss.str();\n\n    return true;\n}\n\n/* dst_reg <-- mem(src_addr_reg +  src_addr_offset)\n * =======================\n * (n1) MovReg R1, src_addr_reg + (src_addr_offset - C1)\n * (n2) dst_reg <-- mem(R1 + C1)\n * ======================= */\nbool StrategyGraph::rule_adjust_load(node_t n, Arch* arch){\n\n    if( nodes[n].type != GadgetType::LOAD &&\n        nodes[n].type != GadgetType::ALOAD ){\n        return false;\n    }\n    \n    if( nodes[n].params[nodes[n].get_param_num_src_addr_reg()].value == arch->sp()){\n        // If we want to read from the stack pointer (typically to pop a value), don't\n        // apply this strategy\n        return false;\n    }\n\n    // Get/Create nodes\n    node_t n1 = new_node(GadgetType::AMOV_CST);\n    node_t n2 = new_node(nodes[n].type);\n    Node& node = nodes[n];\n    Node& node1 = nodes[n1];\n    Node& node2 = nodes[n2];\n    \n    \n    node2 = node; // Copy node to node2\n    node2.id = n2; // But keep id\n    // SET NODE 2\n    // Modify src_addr_reg to be any register\n    node2.params[node2.get_param_num_src_addr_reg()].make_reg(-1, false); // free reg\n    // Make the offset also free\n    node2.params[node2.get_param_num_src_addr_offset()].make_cst(0, new_name(\"addr_offset\"), false); // free cst\n\n    // SET NODE 1\n    node1.params[PARAM_AMOVCST_SRC_OP].make_op(Op::ADD);\n    // Set node1 with the right reg and cst\n    node1.params[PARAM_AMOVCST_DST_REG].make_reg(node2.id,node2.get_param_num_src_addr_reg()); // depends on the load src addr reg\n    node1.params[PARAM_AMOVCST_SRC_REG] = node.params[node.get_param_num_src_addr_reg()]; // Reg should be set with the same reg of the initial LOAD\n    // Cst must be the original offset (of node) minus the new one (of node2)\n    Param& node_offset = node.params[node.get_param_num_src_addr_offset()];\n    Param& node2_offset = node2.params[node2.get_param_num_src_addr_offset()];\n    Expr src_cst_expr = exprvar(arch->bits, node_offset.name) \n                        - exprvar(arch->bits, node2_offset.name);\n    node1.params[PARAM_AMOVCST_SRC_CST].make_cst(node2.id, node2.get_param_num_src_addr_offset(), \n            src_cst_expr, new_name(\"addr_offset\"));\n    node1.params[PARAM_AMOVCST_SRC_CST].add_dep(node.id, node.get_param_num_src_addr_offset());\n\n    // Add data link between node 1 and 2 for the address reg\n    node1.params[PARAM_AMOVCST_DST_REG].is_data_link = true;\n\n    // Node1 must end with a ret\n    node1.branch_type = BranchType::RET;\n    // Node2 same as node\n    node2.branch_type = node.branch_type;\n\n    // Redirect input params arcs from node to node1\n    redirect_param_edges(node.id, node.get_param_num_dst_reg(), \n                                  node2.id, node2.get_param_num_dst_reg());\n\n    // Redirect data_link to node2\n    redirect_param_edges(node.id, node.get_param_num_data_link(), node2.id, node2.get_param_num_data_link());\n\n    // Redirect/add strategy edges\n    add_strategy_edge(node1.id, node2.id);\n    redirect_incoming_strategy_edges(node.id, node1.id);\n    redirect_outgoing_strategy_edges(node.id, node2.id);\n\n    // Update param edges\n    update_param_edges();\n\n    // Disable previous node\n    disable_node(node.id);\n\n    // Update size in the end\n    update_size();\n\n    // Update graph history\n    stringstream ss;\n    ss << _history << \"adjust_load(\" << std::dec << n << \")\" << std::endl;\n    _history = ss.str();\n\n    return true;\n}\n\n/* <AnyType> dst, src_reg\n * =======================\n * (n2) MovReg R1, src_reg\n * (n1) <AnyType> dst, R1\n * ======================= */\nbool StrategyGraph::rule_generic_src_transitivity(node_t n){\n\n    if( nodes[n].type != GadgetType::STORE &&\n        nodes[n].type != GadgetType::ASTORE ){\n        return false;\n    }\n    \n    // Don't apply this strategy if the src reg is already free no sense to add a \n    // transitivy step\n    if( nodes[n].params[nodes[n].get_param_num_src_reg()].is_free()){\n        return false;\n    }\n\n    // Get/Create nodes\n    node_t n2 = new_node(GadgetType::MOV_REG);\n    Node& node = nodes[n];\n    Node& node2 = nodes[n2];\n\n    // Redirect parameter to src_reg\n    redirect_param_edges(node.id, node.get_param_num_src_reg(), node2.id, node2.get_param_num_src_reg());\n\n    // Set node2 with the reg transitivity gadget\n    node2.params[PARAM_MOVREG_DST_REG].make_reg(node.id, node.get_param_num_src_reg()); // Depends on node src reg\n    node2.params[PARAM_MOVREG_SRC_REG] = node.params[node.get_param_num_src_reg()]; // Same src reg as initial query in node\n    // Add data link between node 1 and 2 for the transitive reg\n    node2.params[PARAM_MOVREG_DST_REG].is_data_link = true;\n    // Node2 must end in ret\n    node2.branch_type = BranchType::RET;\n\n    // Modify src_reg to make it free\n    node.params[node.get_param_num_src_reg()].make_reg(-1, false); // Free\n\n    // Update param edges\n    update_param_edges();\n\n    // Redirect/add strategy edges\n    add_strategy_edge(node2.id, node.id);\n\n    // Update size in the end\n    update_size();\n    \n    // Update graph history\n    stringstream ss;\n    ss << _history << \"generic_src_transitivity(\" << std::dec << n << \")\" << std::endl;\n    _history = ss.str();\n    \n    return true;\n}\n\n/* mem(dst_addr_reg, dst_addr_offset) <-- src_reg\n * =======================\n * (n1) MovReg R1, dst_addr_reg + (dst_addr_offset - C1)\n * (n2) mem(R1 + C1) <-- src_reg\n * ======================= */\nbool StrategyGraph::rule_adjust_store(node_t n, Arch* arch){\n\n    if( nodes[n].type != GadgetType::STORE &&\n        nodes[n].type != GadgetType::ASTORE ){\n        return false;\n    }\n    \n    // If we want to store at the stack pointer, don't apply this strategy\n    if( nodes[n].params[nodes[n].get_param_num_dst_addr_reg()].value == arch->sp()){\n        return false;\n    }\n    \n    // If the parameters are free then it doesn't make sense to adjust it\n    if( nodes[n].params[nodes[n].get_param_num_dst_addr_reg()].is_free() && \n        nodes[n].params[nodes[n].get_param_num_dst_addr_offset()].is_free()){\n        return false;\n    }\n\n    // Get/Create nodes\n    node_t n1 = new_node(GadgetType::AMOV_CST);\n    node_t n2 = new_node(nodes[n].type);\n    Node& node = nodes[n];\n    Node& node1 = nodes[n1];\n    Node& node2 = nodes[n2];\n\n    // SET NODE 2\n    node2 = node; // Copy node to node2\n    node2.id = n2; // But keep id\n    // Modify dst_addr_reg to be any register\n    node2.params[node2.get_param_num_dst_addr_reg()].make_reg(-1, false); // free reg\n    // Make the offset also free\n    node2.params[node2.get_param_num_dst_addr_offset()].make_cst(0, new_name(\"addr_offset\"), false); // free cst\n\n    // SET NODE 1\n    node1.params[PARAM_AMOVCST_SRC_OP].make_op(Op::ADD);\n    // Set node1 with the right reg and cst\n    node1.params[PARAM_AMOVCST_DST_REG].make_reg(node2.id,node2.get_param_num_dst_addr_reg()); // depends on the store dst addr reg\n    node1.params[PARAM_AMOVCST_SRC_REG] = node.params[node.get_param_num_dst_addr_reg()]; // Reg should be set with the same reg of the initial STORE\n    // Cst must be the original offset (of node) minus the new one (of node2)\n    Param& node_offset = node.params[node.get_param_num_dst_addr_offset()];\n    Param& node2_offset = node2.params[node2.get_param_num_dst_addr_offset()];\n    Expr src_cst_expr = exprvar(arch->bits, node_offset.name) \n                        - exprvar(arch->bits, node2_offset.name);\n    node1.params[PARAM_AMOVCST_SRC_CST].make_cst(node2.id, node2.get_param_num_dst_addr_offset(), \n            src_cst_expr, new_name(\"addr_offset\"));\n    node1.params[PARAM_AMOVCST_SRC_CST].add_dep(node.id, node.get_param_num_dst_addr_offset());\n\n    // Add data link between node 1 and 2 for the address reg\n    node1.params[PARAM_AMOVCST_DST_REG].is_data_link = true;\n\n    // Node1 must end with a ret\n    node1.branch_type = BranchType::RET;\n    // Node2 same as node\n    node2.branch_type = node.branch_type;\n\n    // Redirect input params arcs from node to node1\n    redirect_param_edges(node.id, node.get_param_num_dst_addr_reg(), \n                                  node2.id, node2.get_param_num_dst_addr_reg());\n    redirect_param_edges(node.id, node.get_param_num_src_reg(), \n                                  node2.id, node2.get_param_num_src_reg());\n\n    // Redirect data_link\n    redirect_param_edges(node.id, node.get_param_num_data_link(), node2.id, node2.get_param_num_data_link());\n\n    // Redirect/add strategy edges\n    add_strategy_edge(node1.id, node2.id);\n    redirect_incoming_strategy_edges(node.id, node1.id);\n    redirect_outgoing_strategy_edges(node.id, node2.id);\n\n    // Update param edges\n    update_param_edges();\n\n    // Disable previous node\n    disable_node(node.id);\n\n    // Update size in the end\n    update_size();\n\n    // Update graph history\n    stringstream ss;\n    ss << _history << \"adjust_store(\" << std::dec << n << \")\" << std::endl;\n    _history = ss.str();\n\n    return true;\n}\n"
  },
  {
    "path": "libropium/compiler/systems.cpp",
    "content": "#include \"systems.hpp\"\n#include <vector>\n\nusing std::vector;\n\n// Supported syscalls\nvector<SyscallDef> linux_x86_syscalls = {\n    SyscallDef(\"exit\", 1, 1),\n    SyscallDef(\"fork\", 2, 1),\n    SyscallDef(\"read\", 3, 3),\n    SyscallDef(\"write\", 4, 3),\n    SyscallDef(\"open\", 5, 3),\n    SyscallDef(\"close\", 6, 2),\n    SyscallDef(\"waitpid\", 7, 3),\n    SyscallDef(\"creat\", 8, 2),\n    SyscallDef(\"link\", 9, 2),\n    SyscallDef(\"unlink\", 10, 1),\n    SyscallDef(\"execve\", 11, 3),\n    SyscallDef(\"chdir\", 12, 1),\n    SyscallDef(\"time\", 13, 1),\n    SyscallDef(\"mknod\", 14, 3),\n    SyscallDef(\"chmod\", 15, 2),\n    SyscallDef(\"lchown\", 16, 2),\n    SyscallDef(\"stat\", 18, 2),\n    SyscallDef(\"lseek\", 19, 3),\n    SyscallDef(\"getpid\", 20, 0),\n    SyscallDef(\"mount\", 21, 3),\n    SyscallDef(\"umount\", 22, 1),\n    SyscallDef(\"setuid\", 23, 1),\n    SyscallDef(\"getuid\", 24, 0),\n    SyscallDef(\"stime\", 25, 1),\n    SyscallDef(\"ptrace\", 26, 4),\n    SyscallDef(\"alarm\", 27, 1),\n    SyscallDef(\"pause\", 29, 0),\n    SyscallDef(\"access\", 33, 2),\n    SyscallDef(\"sync\", 36, 0),\n    SyscallDef(\"kill\", 37, 2),\n    SyscallDef(\"rename\", 38, 2),\n    SyscallDef(\"mkdir\", 39, 2),\n    SyscallDef(\"rmdir\", 40, 1),\n    SyscallDef(\"dup\", 41, 1),\n    SyscallDef(\"umount\", 52, 2),\n    SyscallDef(\"setpgid\", 57, 2),\n    SyscallDef(\"chroot\", 61, 1),\n    SyscallDef(\"sigaction\", 67, 3),\n    SyscallDef(\"symlink\", 83, 2),\n    SyscallDef(\"reboot\", 88, 4),\n    SyscallDef(\"mmap\", 90, 6),\n    SyscallDef(\"munmap\", 91, 2),\n    SyscallDef(\"uname\", 109, 1),\n    SyscallDef(\"mprotect\", 125, 3),\n    SyscallDef(\"sysctl\", 149, 1),\n    SyscallDef(\"setreuid\", 203, 2),\n    SyscallDef(\"setregid\", 204, 2),\n    SyscallDef(\"setuid\", 213, 1),\n    SyscallDef(\"setgid\", 214, 1)\n};\n\nvector<SyscallDef> linux_x64_syscalls = {\n    SyscallDef(\"read\", 0, 3),\n    SyscallDef(\"write\", 1, 3),\n    SyscallDef(\"open\", 2, 3),\n    SyscallDef(\"close\", 3, 2),\n    SyscallDef(\"mmap\", 9, 6),\n    SyscallDef(\"mprotect\", 10, 3),\n    SyscallDef(\"munmap\", 11, 2),\n    SyscallDef(\"rt_sigaction\", 14, 4),\n    SyscallDef(\"rt_sigreturn\", 15, 1),\n    SyscallDef(\"access\", 21, 2),\n    SyscallDef(\"mremap\", 25, 5),\n    SyscallDef(\"pause\", 34, 0),\n    SyscallDef(\"alarm\", 37, 1),\n    SyscallDef(\"getpid\", 39, 0),\n    SyscallDef(\"connect\", 42, 3),\n    SyscallDef(\"accept\", 43, 3),\n    SyscallDef(\"sendto\", 44, 5),\n    SyscallDef(\"rcvfrom\", 45, 5),\n    SyscallDef(\"shutdown\", 48, 2),\n    SyscallDef(\"bind\", 49, 3),\n    SyscallDef(\"listen\", 50, 2),\n    SyscallDef(\"execve\", 59, 3),\n    SyscallDef(\"exit\", 60, 1),\n    SyscallDef(\"kill\", 62, 2),\n    SyscallDef(\"uname\", 63, 1),\n    SyscallDef(\"mkdir\", 83, 2),\n    SyscallDef(\"rmdir\", 84, 1),\n    SyscallDef(\"creat\", 85, 2),\n    SyscallDef(\"link\", 86, 2),\n    SyscallDef(\"unlink\", 87, 1),\n    SyscallDef(\"chmod\", 90, 2),\n    SyscallDef(\"chown\", 92, 3),\n    SyscallDef(\"ptrace\", 101, 4),\n    SyscallDef(\"getuid\", 102, 0),\n    SyscallDef(\"getgid\", 104, 0),\n    SyscallDef(\"setuid\", 105, 1),\n    SyscallDef(\"setgid\", 106, 1),\n    SyscallDef(\"setreuid\", 113, 2),\n    SyscallDef(\"setregid\", 114, 2),\n    SyscallDef(\"chroot\", 161, 1),\n    SyscallDef(\"mount\", 165, 5),\n    SyscallDef(\"umount2\", 166, 2),\n    SyscallDef(\"reboot\", 169, 4)\n};\n\nSyscallDef* get_syscall_def(ArchType arch, System sys, string name){\n    vector<SyscallDef>* list = nullptr;\n    if( arch == ArchType::X86 ){\n        switch( sys ){\n            case System::LINUX: list = &linux_x86_syscalls; break;\n            default: throw runtime_exception(\"get_syscall_def(): got unsupported system for arch X86\");\n        }\n    }else if( arch == ArchType::X64 ){\n        switch( sys ){\n            case System::LINUX: list = &linux_x64_syscalls; break;\n            default: throw runtime_exception(\"get_syscall_def(): got unsupported system for arch X64\");\n        }\n    }else{\n        throw runtime_exception(\"get_syscall_def(): got unknown arch\");\n    }\n    // Find syscall in list\n    for( SyscallDef& def : *list ){\n        if( def.name == name )\n            return &def; // Found\n    }\n    return nullptr; // Not found\n}\n"
  },
  {
    "path": "libropium/database/database.cpp",
    "content": "#include \"database.hpp\"\n#include \"exception.hpp\"\n#include <iostream>\n\nint find_insert_index(vector<Gadget*>& gadget_list, Gadget* gadget){\n    int count= gadget_list.size(); \n    int first = 0; \n    int curr;\n    while(count > 0){\n        curr = first;\n        curr += count/2;\n        if( gadget_list.at(curr)->lthan(*gadget)){\n            first = curr+1;\n            count -= count/2 + 1;\n        }else{\n            count = count/2;\n        }\n    }\n    return first; \n}\n\n\nint find_insert_index_possible_gadgets(PossibleGadgets* possible, Gadget* gadget){\n    int count= possible->gadgets.size(); \n    int first = 0; \n    int curr;\n    while(count > 0){\n        curr = first;\n        curr += count/2;\n        if( possible->gadgets.at(curr).second->at(0)->lthan(*gadget)){\n            first = curr+1;\n            count -= count/2 + 1;\n        }else{\n            count = count/2;\n        }\n    }\n    return first; \n}\n\ngadget_t GadgetDB::add(Gadget* gadget, Arch* arch){\n    Expr e, addr;\n    \n    // Add to global list\n    gadget->id = all.size();\n    all.push_back(gadget);\n\n    // Check semantics and classify gadget\n    // 0. First check is special branch gadget such as syscall/int80\n    if( gadget->branch_type == BranchType::SYSCALL ){\n        syscall.add(0, gadget);\n    }else if( gadget->branch_type == BranchType::INT80 ){\n        int80.add(0, gadget);\n    }\n\n    // 1. Register semantics\n    for( int reg = 0; reg < gadget->semantics->regs->nb_vars(); reg++){\n        e = gadget->semantics->regs->get(reg);\n\n        // JMP\n        if( reg == arch->pc() && e->is_var()){\n            jmp.add(e->reg(), gadget);\n        }\n        // MOV_CST\n        if( e->is_cst() ){\n            mov_cst.add(make_tuple(reg, e->cst()), gadget);\n        }\n        // MOV_REG\n        else if( e->is_var() && !e->is_reg(reg) ){\n            mov_reg.add(make_tuple(reg, e->reg()), gadget);\n            amov_cst.add(make_tuple(reg, e->reg(), (op_t)Op::ADD, 0), gadget);\n        }\n        // AMOV_CST\n        else if( e->is_binop() && e->args[0]->is_cst() && e->args[1]->is_var() && \n                 op_is_symetric(e->op())){\n            amov_cst.add(make_tuple(reg, e->args[1]->reg(), (op_t)e->op(), e->args[0]->cst()), gadget);\n        }else if( e->is_binop() && e->args[1]->is_cst() && e->args[0]->is_var()){\n            amov_cst.add(make_tuple(reg, e->args[0]->reg(), (op_t)e->op(), e->args[1]->cst()), gadget);\n        } \n        // AMOV_REG\n        else if( e->is_binop() && e->args[0]->is_var() && e->args[1]->is_var()){\n            amov_reg.add(make_tuple(reg, e->args[0]->reg(), (op_t)e->op(), e->args[1]->reg()), gadget);\n            if( op_is_symetric(e->op())){\n                amov_reg.add(make_tuple(reg, e->args[1]->reg(), (op_t)e->op(), e->args[0]->reg()), gadget);\n            }\n        }\n        // LOAD\n        else if( e->is_mem() && e->args[0]->is_var()){\n            load.add(make_tuple(reg, e->args[0]->reg(), 0), gadget);\n        }else if( e->is_mem() && e->args[0]->is_binop(Op::ADD) && e->args[0]->args[0]->is_cst()\n                  && e->args[0]->args[1]->is_var()){\n            load.add(make_tuple(reg, e->args[0]->args[1]->reg(), e->args[0]->args[0]->cst()), gadget);\n        }\n        // ALOAD\n        else if( e->is_binop() && e->args[1]->is_reg(reg) && e->args[0]->is_mem() && \n                 e->args[0]->args[0]->is_var()){\n            aload.add(make_tuple(reg, (op_t)e->op(), e->args[0]->args[0]->reg(), 0), gadget);\n        }else if( e->is_binop() && e->args[1]->is_reg(reg) &&\n                  e->args[0]->is_mem() && e->args[0]->args[0]->is_binop(Op::ADD) && \n                  e->args[0]->args[0]->args[0]->is_cst() && e->args[0]->args[0]->args[1]->is_var()){\n            aload.add(make_tuple(reg, (op_t)e->op(), e->args[0]->args[0]->args[1]->reg(), \n                                              e->args[0]->args[0]->args[0]->cst()), gadget);\n        }\n    }\n\n    // 2. Memory semantics\n    for( auto write : gadget->semantics->mem->writes ){\n        addr = write.first;\n        e = write.second;\n        // STORE\n        if( addr->is_var() && e->is_var()){\n            store.add(make_tuple(addr->reg(), 0, e->reg()), gadget);\n        }else if( addr->is_binop(Op::ADD) && addr->args[0]->is_cst() && addr->args[1]->is_var()\n                  && e->is_var()){\n            store.add(make_tuple(addr->args[1]->reg(), addr->args[0]->cst(), e->reg()), gadget);\n        \n        }\n        // ASTORE \n        else if( e->is_binop() && e->args[0]->is_mem() && e->args[1]->is_var()\n                 && (addr->eq(e->args[0]->args[0]))){\n            if( addr->is_var() ){\n                astore.add(make_tuple(addr->reg(), 0, (op_t)e->op(), e->args[1]->reg()), gadget);\n            }else if( addr->is_binop(Op::ADD) && addr->args[0]->is_cst() && addr->args[1]->is_var() ){\n                astore.add(make_tuple(addr->args[1]->reg(), addr->args[0]->cst(), (op_t)e->op(), e->args[1]->reg()), gadget);\n            }\n        }\n    }\n    return gadget->id;\n}\n\nint GadgetDB::analyse_raw_gadgets(vector<RawGadget>& raw_gadgets, Arch* arch){\n    unordered_map<string, Gadget*>::iterator git;\n    Gadget* gadget;\n    Semantics* semantics;\n    IRBlock* irblock;\n    SymbolicEngine sym = SymbolicEngine(arch->type);\n    Expr e;\n    int nb_success = 0;\n    \n    for( auto raw: raw_gadgets ){\n        if( (git = seen.find(raw.raw)) != seen.end()){\n            // Already seen, just add a new address\n            git->second->add_address(raw.addr);\n            nb_success++;\n        }else{\n            // New gadget\n            gadget = new Gadget();\n            // Lift instructions\n            try{\n                if( (irblock = arch->disasm->disasm_block(raw.addr, (code_t)raw.raw.c_str(), raw.raw.size())) == nullptr){\n                    throw runtime_exception(\"disassembler returned null block\");\n                }\n            }catch(std::exception& e){\n                // std::cout << \"DEBUG COULDN'T LIFT GADGET: \" << e.what() << std::endl;\n                    delete gadget; continue;\n            }\n            \n            // Get semantics\n            try{\n                if( (semantics = sym.execute_block(irblock)) == nullptr ){\n                    throw symbolic_exception(\"symbolic engine returned null semantics\");\n                }\n            }catch(symbolic_exception& e){\n                //std::cout << \"DEBUG SYMBOLIC ERROR WHILE EXECUTING GADGET: \" << irblock->name << \" --> \" << e.what() << std::endl;\n                    delete gadget; continue;\n                    delete irblock; irblock = nullptr;\n            }catch(expression_exception& e){\n                //std::cout << \"DEBUG EXPRESSION ERROR WHILE EXECUTING GADGET: \" << irblock->name << \" --> \" << e.what() << std::endl;\n                    delete gadget; continue;\n                    delete irblock; irblock = nullptr;\n            }\n\n            semantics->simplify();\n            gadget->semantics = semantics;\n\n            // Set nb instructions\n            gadget->nb_instr = irblock->_nb_instr;\n            gadget->nb_instr_ir = irblock->_nb_instr_ir;\n\n            // Set dereferenced regs\n            for( int i = 0; i < NB_REGS_MAX; i++){\n                gadget->dereferenced_regs[i] = irblock->dereferenced_regs[i];\n            }\n\n            // Get sp increment\n            if( !irblock->known_max_sp_inc ){\n                // std::cout << \"DEBUG ERROR UNKNOWN MAX SP INC \" << irblock->name << std::endl; // Might clobber our ropchain\n                delete gadget; continue;\n            }else{\n                gadget->max_sp_inc = irblock->max_sp_inc;\n            }\n\n            e = semantics->regs->get(arch->sp());\n            if( e->is_binop(Op::ADD) && e->args[0]->is_cst() && e->args[1]->is_reg(arch->sp())){\n                if( e->args[0]->cst() % arch->octets != 0 ){\n                    // std::cout << \"DEBUG ERROR got SP INC Not multiple of arch size: \" << irblock->name << std::endl;\n                    delete gadget; continue;\n                }\n                gadget->sp_inc = e->args[0]->cst();\n            }else if( e->is_binop(Op::ADD) && e->args[0]->is_unop(Op::NEG) &&\n                      e->args[0]->args[0]->is_cst() && e->args[0]->args[0]->cst() % arch->octets == 0 &&\n                      e->args[1]->is_reg(arch->sp())){\n                // sp = sp0 - cst\n                gadget->sp_inc = -1*e->args[0]->args[0]->cst();\n            }else if( e->is_reg(arch->sp()) ){\n                gadget->sp_inc = 0;\n            }\n\n            // Get branch type\n            if( irblock->ends_with_syscall ){\n                gadget->branch_type = BranchType::SYSCALL;\n            }else if( irblock->ends_with_int80 ){\n                gadget->branch_type = BranchType::INT80;\n            }else{\n                // Check last PC value\n                e = semantics->regs->get(arch->pc());\n                if( e->is_var() ){\n                    // Jmp\n                    gadget->branch_type = BranchType::JMP;\n                    gadget->jmp_reg = arch->reg_num(e->name());\n                }else if( e->is_mem()){\n                    // Ret (sp)\n                    if( e->args[0]->is_var() && arch->reg_num(e->args[0]->name()) == arch->sp() && gadget->sp_inc == arch->octets ){\n                        gadget->branch_type = BranchType::RET;\n                    }else if( e->args[0]->is_binop(Op::ADD) && e->args[0]->args[0]->is_cst() &&\n                              e->args[0]->args[1]->is_var() && (e->args[0]->args[0]->cst() + arch->octets == gadget->sp_inc) && \n                              arch->reg_num(e->args[0]->args[1]->name()) == arch->sp()){\n                        gadget->branch_type = BranchType::RET;\n                    }else{\n                        // std::cout << \"DEBUG ERROR, NO VALID BRANCH TYPE: \" << irblock->name << std::endl;\n                        delete gadget; continue;\n                    }\n                }else{\n                    // std::cout << \"DEBUG ERROR, NO VALID BRANCH TYPE: \" << irblock->name << std::endl;\n                    delete gadget; continue;\n                }\n            }\n\n            // Set name\n            gadget->asm_str = irblock->name;\n            // Set address\n            gadget->addresses.push_back(raw.addr);\n            // Set modified registers\n            for( int r = 0; r < arch->nb_regs; r++){\n                if( !semantics->regs->get(r)->is_var() ||\n                        semantics->regs->get(r)->name() != arch->reg_name(r)){\n                    gadget->modified_regs[r] = true;\n                }\n            }\n\n            // Delete irblock (we don't need it anymore, only semantics)\n            delete irblock; irblock = nullptr;\n\n            // Classify gadget in db\n            seen[raw.raw] = gadget;\n            add(gadget, arch);\n            nb_success++;\n        }\n    }\n\n    return nb_success;\n}\n\nGadget* GadgetDB::get(gadget_t gadget_num){\n    if( gadget_num >= all.size())\n        throw runtime_exception(\"GadgetDB::get() got invalid gadget number\");\n    return all[gadget_num];\n}\n\nconst vector<Gadget*>& GadgetDB::get_mov_cst(reg_t reg, cst_t cst){\n    return mov_cst.get(make_tuple(reg, cst));\n}\n\nconst vector<Gadget*>& GadgetDB::get_mov_reg(reg_t dst_reg, reg_t src_reg){\n    return mov_reg.get(make_tuple(dst_reg, src_reg));\n}\n\nconst vector<Gadget*>& GadgetDB::get_amov_cst(reg_t dst_reg, reg_t src_reg, Op op, cst_t src_cst){\n    return amov_cst.get(make_tuple(dst_reg, src_reg, (op_t)op, src_cst));\n}\n\nconst vector<Gadget*>& GadgetDB::get_amov_reg(reg_t dst_reg, reg_t src_reg1, Op op, reg_t src_reg2){\n    return amov_reg.get(make_tuple(dst_reg, src_reg1, (op_t)op, src_reg2));\n}\n\nconst vector<Gadget*>& GadgetDB::get_load(reg_t dst_reg, reg_t addr_reg, cst_t offset){\n    return load.get(make_tuple(dst_reg, addr_reg, offset));\n}\n\nconst vector<Gadget*>& GadgetDB::get_aload(reg_t dst_reg, Op op, reg_t addr_reg, cst_t offset){\n    return aload.get(make_tuple(dst_reg, (op_t)op, addr_reg, offset));\n}\n\nconst vector<Gadget*>& GadgetDB::get_jmp(reg_t jmp_reg){\n    return jmp.get(jmp_reg);\n}\n\nconst vector<Gadget*>& GadgetDB::get_store(reg_t addr_reg, cst_t offset, reg_t src_reg){\n    return store.get(make_tuple(addr_reg, offset, src_reg));\n}\n\nconst vector<Gadget*>& GadgetDB::get_astore(reg_t addr_reg, cst_t offset, Op op, reg_t src_reg){\n    return astore.get(make_tuple(addr_reg, offset, (op_t)op, src_reg));\n}\n\nconst vector<Gadget*>& GadgetDB::get_int80(){\n    return int80.get(0); // Use dummy key 0\n}\n\nconst vector<Gadget*>& GadgetDB::get_syscall(){\n    return syscall.get(0); // Use dummy key 0\n}\n\n/* ============== Get possible gadgets ===================== */\n\nPossibleGadgets* GadgetDB::get_possible_mov_reg(reg_t dst_reg, reg_t src_reg, bool* param_is_free){\n    return mov_reg.get_possible(make_tuple(dst_reg, src_reg), param_is_free, 2);\n}\n\nPossibleGadgets* GadgetDB::get_possible_amov_reg(reg_t dst_reg, reg_t src_reg1, Op src_op, reg_t src_reg2, bool* param_is_free){\n    return amov_reg.get_possible(make_tuple(dst_reg, src_reg1, (op_t)src_op, src_reg2), param_is_free, 4);\n}\n\nPossibleGadgets* GadgetDB::get_possible_mov_cst(reg_t dst_reg, cst_t src_cst, bool* param_is_free){\n    return mov_cst.get_possible(make_tuple(dst_reg, src_cst), param_is_free, 2);\n}\n\nPossibleGadgets* GadgetDB::get_possible_amov_cst(reg_t dst_reg, reg_t src_reg, Op src_op, cst_t src_cst, bool* param_is_free){\n    return amov_cst.get_possible(make_tuple(dst_reg, src_reg, (op_t)src_op, src_cst), param_is_free, 4);\n}\n\nPossibleGadgets* GadgetDB::get_possible_load(reg_t dst_reg, reg_t src_addr_reg, cst_t src_addr_offset, bool* param_is_free){\n    return load.get_possible(make_tuple(dst_reg, src_addr_reg, src_addr_offset), param_is_free, 3);\n}\n\nPossibleGadgets* GadgetDB::get_possible_aload(reg_t dst_reg, Op op, reg_t src_addr_reg, cst_t src_addr_offset, bool* param_is_free){\n    return aload.get_possible(make_tuple(dst_reg, (op_t)op, src_addr_reg, src_addr_offset), param_is_free, 4);\n}\n\nPossibleGadgets* GadgetDB::get_possible_store(reg_t dst_addr_reg, cst_t dst_addr_cst, reg_t src_reg, bool* param_is_free){\n    return store.get_possible(make_tuple(dst_addr_reg, dst_addr_cst, src_reg), param_is_free, 3);\n}\n\nPossibleGadgets* GadgetDB::get_possible_astore(reg_t dst_addr_reg, cst_t dst_addr_cst, Op op, reg_t src_reg, bool* param_is_free){\n    return astore.get_possible(make_tuple(dst_addr_reg, dst_addr_cst, (op_t)op, src_reg), param_is_free, 3);\n}\n\nvoid GadgetDB::clear(){\n    mov_cst.clear();\n    amov_cst.clear();\n    mov_reg.clear();\n    amov_reg.clear();\n    load.clear();\n    aload.clear();\n    store.clear();\n    astore.clear();\n    for( auto g : all ){\n        delete g;\n    }\n    all.clear();\n    seen.clear();\n}\n\nGadgetDB::~GadgetDB(){\n    // Delete all gadgets\n    clear();\n}\n"
  },
  {
    "path": "libropium/dependencies/murmur3/murmur3.c",
    "content": "//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code.\n\n// Note - The x86 and x64 versions do _not_ produce the same results, as the\n// algorithms are optimized for their respective platforms. You can still\n// compile and run any of them on any platform, but your performance with the\n// non-native version will be less than optimal.\n\n#include \"murmur3.h\"\n\n//-----------------------------------------------------------------------------\n// Platform-specific functions and macros\n\n#ifdef __GNUC__\n#define FORCE_INLINE __attribute__((always_inline)) inline\n#else\n#define FORCE_INLINE inline\n#endif\n\nstatic FORCE_INLINE uint32_t rotl32 ( uint32_t x, int8_t r )\n{\n  return (x << r) | (x >> (32 - r));\n}\n\nstatic FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r )\n{\n  return (x << r) | (x >> (64 - r));\n}\n\n#define\tROTL32(x,y)\trotl32(x,y)\n#define ROTL64(x,y)\trotl64(x,y)\n\n#define BIG_CONSTANT(x) (x##LLU)\n\n//-----------------------------------------------------------------------------\n// Block read - if your platform needs to do endian-swapping or can only\n// handle aligned reads, do the conversion here\n\n#define getblock(p, i) (p[i])\n\n//-----------------------------------------------------------------------------\n// Finalization mix - force all bits of a hash block to avalanche\n\nstatic FORCE_INLINE uint32_t fmix32 ( uint32_t h )\n{\n  h ^= h >> 16;\n  h *= 0x85ebca6b;\n  h ^= h >> 13;\n  h *= 0xc2b2ae35;\n  h ^= h >> 16;\n\n  return h;\n}\n\n//----------\n\nstatic FORCE_INLINE uint64_t fmix64 ( uint64_t k )\n{\n  k ^= k >> 33;\n  k *= BIG_CONSTANT(0xff51afd7ed558ccd);\n  k ^= k >> 33;\n  k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);\n  k ^= k >> 33;\n\n  return k;\n}\n\n//-----------------------------------------------------------------------------\n\nvoid MurmurHash3_x86_32 ( const void * key, int len,\n                          uint32_t seed, void * out )\n{\n  const uint8_t * data = (const uint8_t*)key;\n  const int nblocks = len / 4;\n  int i;\n\n  uint32_t h1 = seed;\n\n  uint32_t c1 = 0xcc9e2d51;\n  uint32_t c2 = 0x1b873593;\n\n  //----------\n  // body\n\n  const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);\n\n  for(i = -nblocks; i; i++)\n  {\n    uint32_t k1 = getblock(blocks,i);\n\n    k1 *= c1;\n    k1 = ROTL32(k1,15);\n    k1 *= c2;\n    \n    h1 ^= k1;\n    h1 = ROTL32(h1,13); \n    h1 = h1*5+0xe6546b64;\n  }\n\n  //----------\n  // tail\n\n  const uint8_t * tail = (const uint8_t*)(data + nblocks*4);\n\n  uint32_t k1 = 0;\n\n  switch(len & 3)\n  {\n  case 3: k1 ^= tail[2] << 16;\n  case 2: k1 ^= tail[1] << 8;\n  case 1: k1 ^= tail[0];\n          k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n  };\n\n  //----------\n  // finalization\n\n  h1 ^= len;\n\n  h1 = fmix32(h1);\n\n  *(uint32_t*)out = h1;\n} \n\n//-----------------------------------------------------------------------------\n\nvoid MurmurHash3_x86_128 ( const void * key, const int len,\n                           uint32_t seed, void * out )\n{\n  const uint8_t * data = (const uint8_t*)key;\n  const int nblocks = len / 16;\n  int i;\n\n  uint32_t h1 = seed;\n  uint32_t h2 = seed;\n  uint32_t h3 = seed;\n  uint32_t h4 = seed;\n\n  uint32_t c1 = 0x239b961b; \n  uint32_t c2 = 0xab0e9789;\n  uint32_t c3 = 0x38b34ae5; \n  uint32_t c4 = 0xa1e38b93;\n\n  //----------\n  // body\n\n  const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);\n\n  for(i = -nblocks; i; i++)\n  {\n    uint32_t k1 = getblock(blocks,i*4+0);\n    uint32_t k2 = getblock(blocks,i*4+1);\n    uint32_t k3 = getblock(blocks,i*4+2);\n    uint32_t k4 = getblock(blocks,i*4+3);\n\n    k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n\n    h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;\n\n    k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n\n    h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;\n\n    k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n\n    h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;\n\n    k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n\n    h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;\n  }\n\n  //----------\n  // tail\n\n  const uint8_t * tail = (const uint8_t*)(data + nblocks*16);\n\n  uint32_t k1 = 0;\n  uint32_t k2 = 0;\n  uint32_t k3 = 0;\n  uint32_t k4 = 0;\n\n  switch(len & 15)\n  {\n  case 15: k4 ^= tail[14] << 16;\n  case 14: k4 ^= tail[13] << 8;\n  case 13: k4 ^= tail[12] << 0;\n           k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;\n\n  case 12: k3 ^= tail[11] << 24;\n  case 11: k3 ^= tail[10] << 16;\n  case 10: k3 ^= tail[ 9] << 8;\n  case  9: k3 ^= tail[ 8] << 0;\n           k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;\n\n  case  8: k2 ^= tail[ 7] << 24;\n  case  7: k2 ^= tail[ 6] << 16;\n  case  6: k2 ^= tail[ 5] << 8;\n  case  5: k2 ^= tail[ 4] << 0;\n           k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;\n\n  case  4: k1 ^= tail[ 3] << 24;\n  case  3: k1 ^= tail[ 2] << 16;\n  case  2: k1 ^= tail[ 1] << 8;\n  case  1: k1 ^= tail[ 0] << 0;\n           k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;\n  };\n\n  //----------\n  // finalization\n\n  h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;\n\n  h1 += h2; h1 += h3; h1 += h4;\n  h2 += h1; h3 += h1; h4 += h1;\n\n  h1 = fmix32(h1);\n  h2 = fmix32(h2);\n  h3 = fmix32(h3);\n  h4 = fmix32(h4);\n\n  h1 += h2; h1 += h3; h1 += h4;\n  h2 += h1; h3 += h1; h4 += h1;\n\n  ((uint32_t*)out)[0] = h1;\n  ((uint32_t*)out)[1] = h2;\n  ((uint32_t*)out)[2] = h3;\n  ((uint32_t*)out)[3] = h4;\n}\n\n//-----------------------------------------------------------------------------\n\nvoid MurmurHash3_x64_128 ( const void * key, const int len,\n                           const uint32_t seed, void * out )\n{\n  const uint8_t * data = (const uint8_t*)key;\n  const int nblocks = len / 16;\n  int i;\n\n  uint64_t h1 = seed;\n  uint64_t h2 = seed;\n\n  uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);\n  uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);\n\n  //----------\n  // body\n\n  const uint64_t * blocks = (const uint64_t *)(data);\n\n  for(i = 0; i < nblocks; i++)\n  {\n    uint64_t k1 = getblock(blocks,i*2+0);\n    uint64_t k2 = getblock(blocks,i*2+1);\n\n    k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;\n\n    h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;\n\n    k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;\n\n    h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;\n  }\n\n  //----------\n  // tail\n\n  const uint8_t * tail = (const uint8_t*)(data + nblocks*16);\n\n  uint64_t k1 = 0;\n  uint64_t k2 = 0;\n\n  switch(len & 15)\n  {\n  case 15: k2 ^= (uint64_t)(tail[14]) << 48;\n  case 14: k2 ^= (uint64_t)(tail[13]) << 40;\n  case 13: k2 ^= (uint64_t)(tail[12]) << 32;\n  case 12: k2 ^= (uint64_t)(tail[11]) << 24;\n  case 11: k2 ^= (uint64_t)(tail[10]) << 16;\n  case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;\n  case  9: k2 ^= (uint64_t)(tail[ 8]) << 0;\n           k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;\n\n  case  8: k1 ^= (uint64_t)(tail[ 7]) << 56;\n  case  7: k1 ^= (uint64_t)(tail[ 6]) << 48;\n  case  6: k1 ^= (uint64_t)(tail[ 5]) << 40;\n  case  5: k1 ^= (uint64_t)(tail[ 4]) << 32;\n  case  4: k1 ^= (uint64_t)(tail[ 3]) << 24;\n  case  3: k1 ^= (uint64_t)(tail[ 2]) << 16;\n  case  2: k1 ^= (uint64_t)(tail[ 1]) << 8;\n  case  1: k1 ^= (uint64_t)(tail[ 0]) << 0;\n           k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;\n  };\n\n  //----------\n  // finalization\n\n  h1 ^= len; h2 ^= len;\n\n  h1 += h2;\n  h2 += h1;\n\n  h1 = fmix64(h1);\n  h2 = fmix64(h2);\n\n  h1 += h2;\n  h2 += h1;\n\n  ((uint64_t*)out)[0] = h1;\n  ((uint64_t*)out)[1] = h2;\n}\n\n//-----------------------------------------------------------------------------\n\n"
  },
  {
    "path": "libropium/dependencies/murmur3/murmur3.h",
    "content": "//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the\n// public domain. The author hereby disclaims copyright to this source\n// code.\n\n#ifndef _MURMURHASH3_H_\n#define _MURMURHASH3_H_\n\n#include <stdint.h>\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n//-----------------------------------------------------------------------------\n\nvoid MurmurHash3_x86_32 (const void *key, int len, uint32_t seed, void *out);\n\nvoid MurmurHash3_x86_128(const void *key, int len, uint32_t seed, void *out);\n\nvoid MurmurHash3_x64_128(const void *key, int len, uint32_t seed, void *out);\n\n//-----------------------------------------------------------------------------\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif // _MURMURHASH3_H_\n"
  },
  {
    "path": "libropium/include/arch.hpp",
    "content": "#ifndef ARCH_H\n#define ARCH_H\n\n#include <cstdint>\n#include <string>\n#include <capstone/capstone.h>\n\nusing std::string;\nusing std::to_string;\n\n/* Forward declarations */\nclass Disassembler;\n\n/* Type aliasing */\ntypedef uint16_t reg_t; \n\n#define NB_REGS_MAX 64\n\n/* CPU modes */\nenum class CPUMode{\n    X86,\n    X64,\n    ARM32,\n    ARM_THUMB,\n    ARM64,\n    NONE\n};\n\n/* Different architectures supported */\nenum class ArchType{\n    X86,\n    X64,\n    ARM32,\n    ARM64,\n    NONE\n};\n\n#include \"disassembler.hpp\"\n\n/* Arch\n   ====\n\nThe Arch object represents an architecture. It holds information such as\nthe size of the registers, the number of registers, and the mode for \narchitectures that can have several modes (X86, ARM, ...). It also holds\na pointer to its corresponding Disassembler.\n\nRegisters are represented as integers under the type reg_t. \n\nThe Arch class is a Base class. Each architecture must the have its own\nchild class, such as ArchX86, ArchX64, etc.\n\n*/\n\nclass Arch{\npublic:\n    ArchType type;\n    Disassembler* disasm;\n    int bits;\n    int octets;\n    int nb_regs;\n    CPUMode mode;\n\n    Arch(ArchType type, int bits, int octets, int nb_regs, CPUMode mode, Disassembler * disasm);\n    virtual ~Arch();\n    virtual string reg_name(reg_t num) = 0;\n    virtual reg_t reg_num(string name) = 0;\n    virtual bool is_valid_reg(string& name) = 0;\n    virtual reg_t sp() = 0; // Stack pointer\n    virtual reg_t pc() = 0; // Program counter\n    virtual reg_t tsc() = 0; // Timestamp counter\n};\n\nclass ArchNone: public Arch{\npublic:\n    ArchNone(): Arch(ArchType::NONE, 32, 4, 20, CPUMode::NONE, (Disassembler*)nullptr){};\n    string reg_name(reg_t num){return \"reg\" + to_string(num);};\n    reg_t reg_num(string name){return 0;};\n    bool is_valid_reg(string& name){return false;};\n    reg_t sp(){return 19;};\n    reg_t pc(){return 18;};\n    reg_t tsc(){return 17;};\n};\n\n\n/* ==================================================\n *                      Arch X86\n * ================================================= */\n \n/* Registers */\n#define X86_EAX 0\n#define X86_EBX 1\n#define X86_ECX 2\n#define X86_EDX 3\n#define X86_EDI 4\n#define X86_ESI 5\n#define X86_EBP 6\n#define X86_ESP 7\n#define X86_EIP 8\n/* Segment Registers */\n#define X86_CS 9\n#define X86_DS 10\n#define X86_ES 11\n#define X86_FS 12\n#define X86_GS 13\n#define X86_SS 14\n/* Flag Registers */\n#define X86_CF 15 // Carry flag\n#define X86_PF 16 // Parity flag\n#define X86_AF 17 // Auxiliary carry flag\n#define X86_ZF 18 // Zero flag\n#define X86_SF 19 // Sign flag\n#define X86_TF 20 // Trap flag\n#define X86_IF 21 // Interrupt enable flag\n#define X86_DF 22 // Direction flag\n#define X86_OF 23 // Overflow flag\n#define X86_IOPL 24 // I/O Priviledge level\n#define X86_NT 25 // Nested task flag\n#define X86_RF 26 // Resume flag\n#define X86_VM 27 // Virtual 8086 mode flag\n#define X86_AC 28 // Alignment check flag (486+)\n#define X86_VIF 29 // Virutal interrupt flag\n#define X86_VIP 30 // Virtual interrupt pending flag\n#define X86_ID 31 // ID Flag\n#define X86_TSC 32 // TImeSTamp counter\n#define X86_NB_REGS 33\n\nclass ArchX86: public Arch{\npublic:\n    ArchX86();\n    string reg_name(reg_t num);\n    reg_t reg_num(string name);\n    bool is_valid_reg(string& name);\n    reg_t sp();\n    reg_t pc();\n    reg_t tsc();\n};\n\n\n/* ==================================================\n *                      Arch X64\n * ================================================= */\n\n/* Registers */\n#define X64_RAX 0\n#define X64_RBX 1\n#define X64_RCX 2\n#define X64_RDX 3\n#define X64_RDI 4\n#define X64_RSI 5\n#define X64_RBP 6\n#define X64_RSP 7\n#define X64_RIP 8\n#define X64_R8 9\n#define X64_R9 10\n#define X64_R10 11\n#define X64_R11 12\n#define X64_R12 13\n#define X64_R13 14\n#define X64_R14 15\n#define X64_R15 16\n/* Segment Registers */\n#define X64_CS 17\n#define X64_DS 18\n#define X64_ES 19\n#define X64_FS 20\n#define X64_GS 21\n#define X64_SS 22\n/* Flag Registers */\n#define X64_CF 23 // Carry flag\n#define X64_PF 24 // Parity flag\n#define X64_AF 25 // Auxiliary carry flag\n#define X64_ZF 26 // Zero flag\n#define X64_SF 27 // Sign flag\n#define X64_TF 28 // Trap flag\n#define X64_IF 29 // Interrupt enable flag\n#define X64_DF 30 // Direction flag\n#define X64_OF 31 // Overflow flag\n#define X64_IOPL 32 // I/O Priviledge level\n#define X64_NT 33 // Nested task flag\n#define X64_RF 34 // Resume flag\n#define X64_VM 35 // Virtual 8086 mode flag\n#define X64_AC 36 // Alignment check flag (486+)\n#define X64_VIF 37 // Virutal interrupt flag\n#define X64_VIP 38 // Virtual interrupt pending flag\n#define X64_ID 39 // ID Flag\n#define X64_TSC 40 // Timestamp counter\n#define X64_NB_REGS 41\n\n\nclass ArchX64: public Arch{\npublic:\n    ArchX64();\n    string reg_name(reg_t num);\n    reg_t reg_num(string name);\n    bool is_valid_reg(string& name);\n    reg_t sp();\n    reg_t pc();\n    reg_t tsc();\n};\n\n#endif\n"
  },
  {
    "path": "libropium/include/assertion.hpp",
    "content": "#ifndef ASSERTION_H\n#define ASSERTION_H\n\n#include \"ropchain.hpp\"\n#include \"arch.hpp\"\n#include <algorithm>\n\nclass ValidPointers{\n    vector<int> _regs;\npublic:\n    void add_valid_pointer(int reg);\n    bool is_valid_pointer(int reg);\n    void clear();\n};\n\n\nclass Assertion{\npublic:\n    ValidPointers valid_pointers;\n    void clear();\n};\n\n#endif\n"
  },
  {
    "path": "libropium/include/compiler.hpp",
    "content": "#ifndef ROP_COMPILER_H\n#define ROP_COMPILER_H\n\n#include \"strategy.hpp\"\n#include \"il.hpp\"\n#include \"database.hpp\"\n#include \"systems.hpp\"\n#include <list>\n\nusing std::list;\n\n\nenum class ABI{\n    /* X86 */\n    X86_CDECL,\n    X86_STDCALL,\n    X86_FASTCALL,\n    X86_THISCALL_GCC,\n    X86_THISCALL_MS,\n    X86_LINUX_SYSENTER,\n    X86_LINUX_INT80,\n    /* X64 */\n    X64_MS,\n    X64_SYSTEM_V,\n    /* No specific ABI */\n    NONE\n};\n\n/* CompilerTask\n   ============\n\n   A compiler task is basically a set of StrategyGraph. For each graph,\n   it tries to find a valid gadget selection. \n\n   - If it succeeds, the corresponding ROPChain is returned.\n   - If it fails, it applies strategy rules to the graph to create new graphs,\n     adds the new graphs to the queue of pending graphs, and then tries the \n     next one\n\n*/\n\nclass CompilerTask{\n    void apply_rules_to_graph(StrategyGraph* graph, int max_tries);\n    Arch * arch;\npublic:\n    CompilerTask(Arch* arch);\n    vector<StrategyGraph*> pending_strategies;\n    void add_strategy(StrategyGraph* graph, int max_tries);\n    ROPChain* compile(Arch* arch, GadgetDB* db, Constraint* constraint=nullptr, int nb_tries=3000);\n    void clear();\n    ~CompilerTask();\n};\n\n/* ROPCompiler\n   ============\n\n   A ROP compiler is an abstraction over IL and StrategyGraph functionnalities.\n   Basically it takes an IL program, parses it, translates it into strategy \n   graphs, and then start a compiler task to try to satisfy the program and\n   find a matching ROPChain.\n\n*/\n\nclass ROPCompiler{\n    ROPChain* _set_registers_permutation( vector<ILInstruction>& instr, vector<int>& permutation, \n        Constraint* constraint, list<vector<int>>& failed_perms, bool& failed_on_first);\n    bool is_complex_instr(ILInstruction& instr, ABI abi);\npublic:\n    Arch* arch;\n    GadgetDB* db;\n    // Translate function calls into strategy graphs\n    bool _x86_cdecl_to_strategy(StrategyGraph& graph, ILInstruction& instr);\n    bool _x86_stdcall_to_strategy(StrategyGraph& graph, ILInstruction& instr);\n\n    // Compile wrappers for functions and syscalls that set multiple registers\n    ROPChain* _set_multiple_registers(vector<ILInstruction>& instr, Constraint* constraint);\n    ROPChain* _compile_x86_linux_syscall(ILInstruction& instr, Constraint* constraint);\n    ROPChain* _compile_x64_linux_syscall(ILInstruction& instr, Constraint* constraint);\n    ROPChain* _compile_x64_system_v_call(ILInstruction& instr, Constraint* constraint);\n    ROPChain* _compile_x64_ms_call(ILInstruction& instr, Constraint* constraint);\n\n    ROPCompiler( Arch* arch, GadgetDB* db);\n\n    // Main API\n    // Take a list of instructions and compile all of them sequentially into a ropchain\n    ROPChain* process_simple(vector<ILInstruction>& instructions, Constraint* constraint=nullptr, ABI abi = ABI::NONE, System sys=System::NONE);\n    ROPChain* process_complex(vector<ILInstruction>& instructions, Constraint* constraint=nullptr, ABI abi = ABI::NONE, System sys=System::NONE);\n    // Transform complex instructions into simpler instructions that can be handled by \"process()\"\n    bool preprocess(vector<ILInstruction>& dst, vector<ILInstruction>& src, Constraint* constraint=nullptr);\n    // Parse a program into a vector of instructions\n    vector<ILInstruction> parse(string& program);\n    // Translate an IL instruction into one or several strategy graphs\n    void il_to_strategy(vector<StrategyGraph*>& graphs, ILInstruction& instr, Constraint* constraint = nullptr, ABI abi = ABI::NONE, System sys=System::NONE);\n    // Parse and process a program\n    ROPChain* compile(string program, Constraint* constraint=nullptr, ABI abi=ABI::NONE, System sys=System::NONE);\n};\n\n#endif\n"
  },
  {
    "path": "libropium/include/constraint.hpp",
    "content": "#ifndef CONSTRAINT_H\n#define CONSTRAINT_H\n\n#include \"ropchain.hpp\"\n#include \"arch.hpp\"\n#include \"assertion.hpp\"\n#include <algorithm>\n\nclass BadBytes{\n    vector<unsigned char> _bad_bytes;\npublic:\n    void add_bad_byte(unsigned char byte);\n    void clear();\n    bool is_valid_byte(unsigned char byte);\n    unsigned char get_valid_byte();\n    bool is_valid_address(addr_t addr, int arch_bytes);\n    addr_t get_valid_padding(int arch_bytes);\n    addr_t get_valid_address(Gadget* gadget, int nb_bytes);\n    bool check(Gadget* gadget, int arch_bytes);\n};\n\nclass KeepRegs{\n    vector<int> _keep;\npublic:\n    void add_keep_reg(int reg_num);\n    vector<int>& regs_to_keep();\n    bool is_kept(int reg_num);\n    void clear();\n    bool check(Gadget* gadget);\n};\n\nclass MemSafety{\n    bool _force_safe;\n    bool _safe_reg_pointers[NB_REGS_MAX]; // Registers that should be considered valid pointers\npublic:\n    MemSafety();\n    void force_safe();\n    void enable_unsafe();\n    bool is_enforced();\n    void add_safe_reg(int reg_num);\n    void clear();\n    bool check(Gadget* gadget, int arch_nb_regs, Assertion* assertion=nullptr);\n};\n\nclass Constraint{\npublic:\n    BadBytes bad_bytes;\n    KeepRegs keep_regs;\n    MemSafety mem_safety;\n\n    void clear();\n    bool check(Gadget* gadget, Arch* arch, Assertion* assertion = nullptr);\n};\n\n#endif\n"
  },
  {
    "path": "libropium/include/database.hpp",
    "content": "#ifndef DATABASE_H\n#define DATABASE_H\n\n#include <iostream>\n#include <tuple>\n#include <unordered_map>\n#include \"utils.hpp\"\n#include \"expression.hpp\"\n#include \"ropchain.hpp\"\n#include \"arch.hpp\"\n\nusing std::pair;\nusing std::tuple;\nusing std::make_tuple;\nusing std::unordered_map;\n\ntypedef int gadget_t; \n\n#define NO_GADGET -1\n\n#define DB_MAX_REGS 64\n\n// Types of gadgets supported in database\nenum class GadgetType{\n    NOP,\n    // Register to register\n    MOV_CST,    // reg <- cst\n    MOV_REG,    // reg <- reg\n    AMOV_CST,   // reg <- reg OP cst\n    AMOV_REG,   // reg <- reg OP reg\n    // Read from memory\n    LOAD,       // reg <- mem(reg + offset)\n    ALOAD,      // reg OP<- mem(reg + offset)\n    // Store to memory\n    STORE,      // mem(reg + offset) <- reg\n    ASTORE,     // mem(reg + offset) OP<- reg\n    // jump\n    JMP,     // PC <- reg\n    // Syscalls\n    SYSCALL,\n    INT80\n};\n\n/* PossibleGadgets\n   ===============\n    This class holds query results to the database where some parameters\n    are 'free' (ie eax = ebx + X where X is not fixed)\n*/\n\nclass PossibleGadgets{\npublic:\n    vector<pair<vector<cst_t>, vector<Gadget*>*>> gadgets; // Pointers to vector<Gadget*> are not owned! \n    int size(){return gadgets.size();};\n    vector<Gadget*>& get_gadgets(int i){return *(gadgets[i].second);};\n    cst_t get_param(int i, int p){return gadgets[i].first[p];};\n    PossibleGadgets(){};\n    PossibleGadgets(const PossibleGadgets& other){\n        gadgets = std::move(other.gadgets);\n    };\n};\n\n\n// Generic database for different kinds of gadgets\nint find_insert_index(vector<Gadget*>& gadget_list, Gadget* gadget);\nint find_insert_index_possible_gadgets(PossibleGadgets* possible, Gadget* gadget);\n\ntemplate<class K> \nclass BaseDB{\npublic:\n    unordered_map<K, vector<Gadget*>> db;\n\n    // Template methods\n    void add(K key, Gadget* gadget){\n        vector<Gadget*>::iterator it;\n        int index;\n        if( db.count(key) > 0 ){\n            index = find_insert_index(db[key], gadget);\n            db[key].insert(db[key].begin()+index, gadget);\n        }else{\n            db[key] = vector<Gadget*>{gadget};\n        }\n    }\n\n    const vector<Gadget*>& get(K key){\n        typename unordered_map<K, vector<Gadget*>>::iterator it;\n        if( (it = db.find(key)) == db.end()){\n            db[key] = vector<Gadget*>{};\n            return db[key];\n        }else{\n            return it->second;\n        }\n    }\n    \n    bool _check_key_match(const K& key1, const K& key2, bool* param_is_free, int nb_params){\n        auto a1 = tuple_to_array(key1);\n        auto a2 = tuple_to_array(key2);\n        for( int p = 0; p < a1.size(); p++){\n            if( !param_is_free[p] && !(a1[p] == a2[p]))\n                return false;\n        }\n        return true;\n    }\n\n    PossibleGadgets* get_possible(K key, bool* param_is_free, int nb_params){\n        PossibleGadgets* res = new PossibleGadgets();\n        int index;\n        for( auto& it : db ){\n            // Check if key matches\n            if( !it.second.empty() && _check_key_match(key, it.first, param_is_free, nb_params)){\n                vector<cst_t> vec = tuple_to_vector(it.first);\n                index = find_insert_index_possible_gadgets(res, it.second[0]);\n                res->gadgets.insert(res->gadgets.begin()+index, std::make_pair(vec, &(it.second)));\n            }\n        }\n        return res;\n    }\n    \n    void clear(){\n        db.clear();\n    }\n};\n\n// Big gadget database\n\ntypedef int op_t;\nclass GadgetDB{\n    // Map of raw gadget strings that have already be analysed \n    unordered_map<string, Gadget*> seen;\npublic:\n    // Global gadgets lisst (gadgets are owned)\n    vector<Gadget*> all;\n    // Databases for all different gadgets types\n    BaseDB<tuple<reg_t, cst_t>> mov_cst;\n    BaseDB<tuple<reg_t, reg_t>> mov_reg;\n    BaseDB<tuple<reg_t, reg_t, op_t, cst_t>> amov_cst;\n    BaseDB<tuple<reg_t, reg_t, op_t, reg_t>> amov_reg;\n    BaseDB<tuple<reg_t, reg_t, addr_t>> load;\n    BaseDB<tuple<reg_t, op_t, reg_t, addr_t>> aload;\n    BaseDB<tuple<reg_t, addr_t, reg_t>> store;\n    BaseDB<tuple<reg_t, addr_t, op_t, reg_t>> astore;\n    BaseDB<reg_t> jmp;\n    BaseDB<int> syscall; // <int> key is always 0\n    BaseDB<int> int80; // <int> key is always 0\n\n    // Add and classify a gadget in the database\n    gadget_t add(Gadget* gadget, Arch* arch);\n    // Analyse raw gadgets and fill the database accordingly\n    // return the number of successfuly analysed gadgets\n    int analyse_raw_gadgets(vector<RawGadget>& raw_gadgets, Arch* arch);\n    // Get a gadget by id\n    Gadget* get(gadget_t gadget_num);\n    \n    // Get gadgets semantically\n    const vector<Gadget*>& get_mov_cst(reg_t reg, cst_t cst);\n    const vector<Gadget*>& get_mov_reg(reg_t dst_reg, reg_t src_reg);\n    const vector<Gadget*>& get_amov_cst(reg_t dst_reg, reg_t src_reg, Op op, cst_t src_cst);\n    const vector<Gadget*>& get_amov_reg(reg_t dst_reg, reg_t src_reg1, Op op, reg_t src_reg2);\n    const vector<Gadget*>& get_load(reg_t dst_reg, reg_t addr_reg, cst_t offset);\n    const vector<Gadget*>& get_aload(reg_t dst_reg, Op op, reg_t addr_reg, cst_t offset);\n    const vector<Gadget*>& get_jmp(reg_t jmp_reg);\n    const vector<Gadget*>& get_store(reg_t addr_reg, cst_t offset, reg_t src_reg);\n    const vector<Gadget*>& get_astore(reg_t addr_reg, cst_t offset, Op op, reg_t src_reg);\n    const vector<Gadget*>& get_syscall();\n    const vector<Gadget*>& get_int80();\n\n    // Get gadgets with optional parameters\n    PossibleGadgets* get_possible_mov_cst(reg_t reg, cst_t cst, bool* param_is_free);\n    PossibleGadgets* get_possible_mov_reg(reg_t dst_reg, reg_t src_reg, bool* param_is_free);\n    PossibleGadgets* get_possible_amov_cst(reg_t dst_reg, reg_t src_reg, Op op, cst_t src_cst, bool* param_is_free);\n    PossibleGadgets* get_possible_amov_reg(reg_t dst_reg, reg_t src_reg1, Op op, reg_t src_reg2, bool* param_is_free);\n    PossibleGadgets* get_possible_load(reg_t dst_reg, reg_t addr_reg, cst_t offset, bool* param_is_free);\n    PossibleGadgets* get_possible_aload(reg_t dst_reg, Op op, reg_t addr_reg, cst_t offset, bool* param_is_free);\n    PossibleGadgets* get_possible_jmp(reg_t jmp_reg, bool* param_is_free);\n    PossibleGadgets* get_possible_store(reg_t addr_reg, cst_t offset, reg_t src_reg, bool* param_is_free);\n    PossibleGadgets* get_possible_astore(reg_t addr_reg, cst_t offset, Op op, reg_t src_reg, bool* param_is_free);\n\n    // Clear\n    void clear();\n\n    // Destructor\n    ~GadgetDB();\n};\n\n#endif\n"
  },
  {
    "path": "libropium/include/disassembler.hpp",
    "content": "#ifndef DISASSEMBLER_H\n#define DISASSEMBLER_H\n\n#include \"arch.hpp\"\n#include \"ir.hpp\"\n#include <cstdint>\n\n/* Types aliasing */\ntypedef uint8_t* code_t;\n\n/* Forward declaration */\nenum class CPUMode;\nclass IRBlock;\n\n/* Disassembler\n   ============\n\nA disassembler is aimed at translating bytecode into IR. \n\nIt is basically a wrapper around capstone disassembly framework.\nEvery disassembler initializes a capstone context with a handle and an cs_insn \npointer in order to disassembly code iteratively instruction by instruction. \n\n*/\n\nclass Disassembler{\npublic:\n    CPUMode _mode;\n    /* Capstone objects */\n    csh _handle;\n    cs_insn * _insn;\n    virtual ~Disassembler();\n    /* Disassemble instructions until next branch instruction and return an IRBlock* */\n    virtual IRBlock* disasm_block(addr_t addr, code_t code, size_t code_size=0xffffffff)=0;\n};\n\nclass DisassemblerX86: public Disassembler{\npublic:\n    DisassemblerX86(CPUMode mode);\n    IRBlock* disasm_block(addr_t addr, code_t code, size_t code_size=0xffffffff);\n};\n\n#endif\n"
  },
  {
    "path": "libropium/include/exception.hpp",
    "content": "#ifndef EXCEPTION_H\n#define EXCEPTION_H\n\n#include <sstream>\n#include <string>\n#include <exception>\n\nusing std::string;\n\n/* From stackoverflow */\nclass QuickFmt{\npublic:\n    QuickFmt() {}\n    ~QuickFmt() {}\n\n    template <typename Type>\n    QuickFmt & operator << (const Type & value)\n    {\n        stream_ << value;\n        return *this;\n    }\n\n    std::string str() const         { return stream_.str(); }\n    operator std::string () const   { return stream_.str(); }\n\n    enum ConvertToString \n    {\n        to_str\n    };\n    std::string operator >> (ConvertToString) { return stream_.str(); }\n\nprivate:\n    std::stringstream stream_;\n\n    QuickFmt(const QuickFmt &);\n    QuickFmt & operator = (QuickFmt &);\n};\n\n/* Generic exception \n * This exception is thrown when an unexpected error or inconsistency occurs\n * and execution should not continue */\nclass runtime_exception: public std::exception {\n    string _msg;\npublic:\n    explicit runtime_exception(string msg): _msg(msg){};\n    virtual const char * what () const throw () {\n      return _msg.c_str();\n   }\n};\n\n/* Expression exception */\nclass expression_exception: public std::exception {\n    string _msg;\npublic:\n    explicit expression_exception(string msg): _msg(msg){};\n    virtual const char * what () const throw () {\n      return _msg.c_str();\n   }\n};\n\nclass ir_exception: public std::exception {\n    string _msg;\npublic:\n    explicit ir_exception(string msg): _msg(msg){};\n    virtual const char * what () const throw () {return _msg.c_str();}\n};\n\nclass il_exception: public std::exception {\n    string _msg;\npublic:\n    explicit il_exception(string msg): _msg(msg){};\n    virtual const char * what () const throw () {return _msg.c_str();}\n};\n\nclass compiler_exception: public std::exception {\n    string _msg;\npublic:\n    explicit compiler_exception(string msg): _msg(msg){};\n    virtual const char * what () const throw () {return _msg.c_str();}\n};\n\nclass strategy_exception: public std::exception {\n    string _msg;\npublic:\n    explicit strategy_exception(string msg): _msg(msg){};\n    virtual const char * what () const throw () {return _msg.c_str();}\n};\n\n/* Symbolic Exception */\nclass symbolic_exception: public std::exception {\n    string _msg;\npublic:\n    explicit symbolic_exception(string msg): _msg(msg){};\n    virtual const char * what () const throw () {\n      return _msg.c_str();\n   }\n}; \n\nclass unsupported_instruction_exception: public std::exception {\n    string _msg;\npublic:\n    explicit unsupported_instruction_exception(string msg): _msg(msg){};\n    virtual const char * what () const throw () {\n      return _msg.c_str();\n   }\n}; \n\nclass illegal_instruction_exception: public std::exception {\n    string _msg;\npublic:\n    explicit illegal_instruction_exception(string msg): _msg(msg){};\n    virtual const char * what () const throw () {\n      return _msg.c_str();\n   }\n}; \n\n/* Test exception */ \nclass test_exception : public std::exception {\n   const char * what () const throw () {\n      return \"Unit test failure\";\n   }\n};\n\n#endif\n"
  },
  {
    "path": "libropium/include/expression.hpp",
    "content": "#ifndef EXPRESSION_H\n#define EXPRESSION_H\n\n#include <cstdint>\n#include <string>\n#include <vector>\n#include <memory>\n#include <ostream>\n#include <map>\n#include \"exception.hpp\"\n\nusing std::string;\nusing std::vector;\nusing std::shared_ptr;\nusing std::ostream;\nusing std::map;\n\n/* Type aliasing */\ntypedef uint16_t exprsize_t ;\ntypedef uint32_t hash_t;\ntypedef int64_t cst_t;\ntypedef uint64_t ucst_t;\ntypedef ucst_t addr_t;\n\n/* Types of expressions\n   ====================\n\nDifferent expression types are supported: \n - CST: constant value\n - VAR: symbolic variable, identified by its name\n - MEM: a memory content, identified by an address and the number of bits\n        that are read\n - UNOP/BINOP:  unary and binary operations on expressions\n - EXTRACT: extraction of a bit-interval of another expression, the interval\n            is specified with the values of the higher and lower bits to\n            extract\n - CONCAT: binary concatenation of two expressions\n - BISZ: zero testing. Depending on its mode, it can be equal to 1 IFF \n         the argument is zero, or to 0 IFF the argument is zero\n - UNKNOWN: represents a value which is unknown or can't be computed\n*/\nenum class ExprType {\n    VAR, \n    MEM,\n    EXTRACT, \n    CONCAT,\n    UNOP, \n    BINOP,\n    BISZ,\n    CST,\n    UNKNOWN\n};\nbool operator<(ExprType t1, ExprType t2);\n\n/* Types of operations\n   ===================\n\nDifferent operations on expressions are supported. Their effects are \npretty straightforward. \n\nNote that unary and binary operations are a member of the same enum.\nNote that there is no binary SUB operation, only a unary SUB.\n*/\nenum class Op {\n    ADD=0,\n    MUL,\n    MULH,\n    SMULL,\n    SMULH,\n    DIV,\n    SDIV,\n    NEG,\n    AND,\n    OR,\n    XOR,\n    SHL,\n    SHR,\n    MOD,\n    SMOD,\n    NOT,\n    NONE // No operation\n};\n\nstring op_to_str(Op op);\nbool operator<(Op op1, Op op2);\nbool op_is_symetric(Op op);\nbool op_is_associative(Op op);\nbool op_is_left_associative(Op op);\nbool op_is_distributive_over(Op op1, Op op2);\nbool op_is_multiplication(Op op);\n\n\n/* Forward declarations */\nclass ExprObject;\nclass VarContext;\ntypedef shared_ptr<ExprObject> Expr;\n\n/* Generic base class */ \nclass ExprObject{\nfriend class ExprSimplifier;\nprotected:\n    // Hash \n    bool _hashed;\n    hash_t _hash;\n    // Concrete\n    cst_t _concrete;\n    int _concrete_ctx_id;\n    // Simplification\n    Expr _simplified_expr;\n    bool _is_simplified;\npublic:\n    // General\n    const ExprType type;\n    exprsize_t size;\n    vector<Expr> args;\n\n    ExprObject(ExprType type, exprsize_t size, bool _is_simp=false);\n    virtual void get_associative_args(Op op, vector<Expr>& vec){};\n    virtual void get_left_associative_args(Op op, vector<Expr>& vec, Expr& leftmost){};\n    \n    /* Virtual accessors of specialized child classes members */\n    virtual hash_t hash(){throw runtime_exception(\"Called virtual function in ExprObject base class!\");};\n    virtual cst_t cst(){throw runtime_exception(\"Called virtual function in ExprObject base class!\");};\n    virtual const string& name(){throw runtime_exception(\"Called virtual function in ExprObject base class!\");};\n    virtual void replace_name(string& new_name){throw runtime_exception(\"Called virtual function in ExprObject base class!\");};\n    virtual Op op(){throw runtime_exception(\"Called virtual function in ExprObject base class!\");};\n    virtual cst_t mode(){throw runtime_exception(\"Called virtual function in ExprObject base class!\");};\n    virtual void print(ostream& out){out << \"???\";};\n    virtual int reg(){throw runtime_exception(\"Called virtual function in ExprObject base class!\");};\n    \n    /* Type */\n    bool is_cst();\n    bool is_var();\n    virtual bool is_reg(int reg){return false;}\n    bool is_mem();\n    virtual bool is_unop(Op op=Op::NONE);\n    virtual bool is_binop(Op op=Op::NONE);\n    bool is_extract();\n    bool is_concat();\n    bool is_bisz();\n    bool is_unknown();\n    \n    /* Concretize */\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    \n    /* Equality between expressions */\n    bool eq(Expr other);\n    bool neq(Expr other);\n    \n    /* Priority between expressions */\n    bool inf(Expr other);\n    \n    /* Replace constants (used for dependencies) */\n    void replace_var_name(string& curr_name, string& new_name); \n    \n    /* Copy */\n    virtual Expr copy(){throw runtime_exception(\"Called virtual function in ExprObject base class!\");};\n};\n\n/* Child specialized classes */\nclass ExprCst: public ExprObject{\n    cst_t _cst;\npublic:\n    ExprCst(exprsize_t size, cst_t cst);\n    hash_t hash();\n    cst_t cst();\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    virtual Expr copy();\n    void print(ostream& out);\n};\n\nclass ExprVar: public ExprObject{\n    const string _name;\n    int _num;\npublic:\n    ExprVar(exprsize_t size, string name, int num=0);\n    hash_t hash();\n    bool is_reg(int reg);\n    int reg();\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    const string& name();\n    void replace_name(string& new_name);\n    virtual Expr copy();\n    void print(ostream& out);\n};\n\nclass ExprMem: public ExprObject{\npublic:\n    ExprMem(exprsize_t size, Expr addr);\n    hash_t hash();\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    virtual Expr copy();\n    void print(ostream& out);\n};\n\nclass ExprUnop: public ExprObject{\n    Op _op;\npublic:\n    ExprUnop(Op op, Expr arg);\n    hash_t hash();\n    Op op();\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    void print(ostream& out);\n    virtual Expr copy();\n    bool is_unop(Op op);\n};\n\nclass ExprBinop: public ExprObject{\n    Op _op;\npublic:\n    ExprBinop(Op op, Expr left, Expr right);\n    hash_t hash();\n    Op op();\n    void get_associative_args(Op op, vector<Expr>& vec);\n    void get_left_associative_args(Op op, vector<Expr>& vec, Expr& leftmost);\n\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    void print(ostream& out);\n    virtual Expr copy();\n    bool is_binop(Op op);\n};\n\nclass ExprExtract: public ExprObject{\npublic:\n    ExprExtract(Expr arg, Expr higher, Expr lower);\n    hash_t hash();\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    virtual Expr copy();\n    void print(ostream& out);\n};\n\nclass ExprConcat: public ExprObject{\npublic:\n    ExprConcat(Expr upper, Expr lower);\n    hash_t hash();\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    virtual Expr copy();\n    void print(ostream& out);\n};\n\nclass ExprBisz: public ExprObject{\n    cst_t _mode;\npublic:\n    ExprBisz(exprsize_t size, Expr cond, cst_t mode);\n    hash_t hash();\n    cst_t mode();\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    virtual Expr copy();\n    void print(ostream& out);\n};\n\nclass ExprUnknown: public ExprObject{\npublic:\n    ExprUnknown(exprsize_t size);\n    hash_t hash();\n    virtual cst_t concretize(VarContext* ctx=nullptr);\n    void print(ostream& out);\n};\n\n/* Helper functions to create new expressions */\n// Create from scratch  \nExpr exprcst(exprsize_t size, cst_t cst);\nExpr exprvar(exprsize_t size, string name, int num=-1);\nExpr exprmem(exprsize_t size, Expr addr);\nExpr exprunop(Op op, Expr arg);\nExpr exprbinop(Op op, Expr left, Expr right);\nExpr extract(Expr arg, unsigned long higher, unsigned long lower);\nExpr extract(Expr arg, Expr higher, Expr lower);\nExpr concat(Expr upper, Expr lower);\nExpr bisz(exprsize_t size, Expr arg, cst_t mode);\nExpr exprunknown(exprsize_t size);\n\n// Binary operations \nExpr operator+(Expr left, Expr right);\nExpr operator+(Expr left, cst_t right);\nExpr operator+(cst_t left, Expr right);\n\nExpr operator-(Expr left, Expr right);\nExpr operator-(Expr left, cst_t right);\nExpr operator-(cst_t left, Expr right);\n\nExpr operator*(Expr left, Expr right);\nExpr operator*(Expr left, cst_t right);\nExpr operator*(cst_t left, Expr right);\n\nExpr operator/(Expr left, Expr right);\nExpr operator/(Expr left, cst_t right);\nExpr operator/(cst_t left, Expr right);\n\nExpr operator&(Expr left, Expr right);\nExpr operator&(Expr left, cst_t right);\nExpr operator&(cst_t left, Expr right);\n\nExpr operator|(Expr left, Expr right);\nExpr operator|(Expr left, cst_t right);\nExpr operator|(cst_t left, Expr right);\n\nExpr operator^(Expr left, Expr right);\nExpr operator^(Expr left, cst_t right);\nExpr operator^(cst_t left, Expr right);\n\nExpr operator%(Expr left, Expr right);\nExpr operator%(Expr left, cst_t right);\nExpr operator%(cst_t left, Expr right);\n\nExpr operator<<(Expr left, Expr right);\nExpr operator<<(Expr left, cst_t right);\nExpr operator<<(cst_t left, Expr right);\n\nExpr operator>>(Expr left, Expr right);\nExpr operator>>(Expr left, cst_t right);\nExpr operator>>(cst_t left, Expr right);\n\nExpr shl(Expr arg, Expr shift);\nExpr shl(Expr arg, cst_t shift);\nExpr shl(cst_t arg, Expr shift);\n\nExpr shr(Expr arg, Expr shift);\nExpr shr(Expr arg, cst_t shift);\nExpr shr(cst_t arg, Expr shift);\n\nExpr sdiv(Expr left, Expr right);\nExpr sdiv(Expr left, cst_t right);\nExpr sdiv(cst_t left, Expr right);\n\nExpr smod(Expr left, Expr right);\nExpr smod(Expr left, cst_t right);\nExpr smod(cst_t left, Expr right);\n\nExpr mulh(Expr left, Expr right);\nExpr mulh(Expr left, cst_t right);\nExpr mulh(cst_t left, Expr right);\n\nExpr smull(Expr left, Expr right);\nExpr smull(Expr left, cst_t right);\nExpr smull(cst_t left, Expr right);\n\nExpr smulh(Expr left, Expr right);\nExpr smulh(Expr left, cst_t right);\nExpr smulh(cst_t left, Expr right);\n\n// Unary operations\nExpr operator~(Expr arg);\nExpr operator-(Expr arg);\n\n/* Printing expressions */\nostream& operator<< (ostream& os, Expr e);\n\n/* Canonizing expressions */\nExpr expr_canonize(Expr e);\n\ncst_t cst_sign_trunc(exprsize_t size, cst_t val);\ncst_t cst_mask(exprsize_t size);\ncst_t cst_sign_extend(exprsize_t size, cst_t val);\n\n\n// Macros to statically cast expressions to access fields if needed\n#define _exprobject_(e) (*(static_cast<ExprObject*>(e.get())))\n#define _cst_(e) (*(static_cast<ExprCst*>(e.get())))\n#define _var_(e) (*(static_cast<ExprVar*>(e.get())))\n#define _mem_(e) (*(static_cast<ExprMem*>(e.get())))\n#define _unop_(e) (*(static_cast<ExprUnop*>(e.get())))\n#define _binop_(e) (*(static_cast<ExprBinop*>(e.get())))\n#define _extract_(e) (*(static_cast<ExprExtract*>(e.get())))\n#define _concat_(e) (*(static_cast<ExprConcat*>(e.get())))\n#define _bisz_(e) (*(static_cast<ExprBisz*>(e.get())))\n#define _unknown_(e) (*(static_cast<ExprUnknown*>(e.get())))\n\n\n/* VarContext\n   ==========\nA VarContext associates a list of concrete values to a list of variables.\nIt used with the variables names as keys for lookup. */\nclass VarContext{\n    map<string, cst_t> varmap;\npublic:\n    int id;\n    VarContext(int id=0);\n    void set(const string& name, cst_t value);\n    cst_t get(const string& name);\n    void remove(const string& name);\n    void print(ostream& os);\n};\n\nostream& operator<<(ostream& os, VarContext& c);\n\n#endif\n"
  },
  {
    "path": "libropium/include/il.hpp",
    "content": "#ifndef IL_HPP\n#define IL_HPP\n\n#include \"arch.hpp\"\n\n/* IL - Intermediate Language\n   ==========================\n  \n  The IL is used to write ROP programs that the ROPCompiler will\n  then try to satisfy using the available gadgets.\n  \n  It is very close to the different kinds of gadgets supported (\n  MOV_REG, MOV_CST, LOAD, STORE, etc) with a few extra types \n  available for convenience (like CST_STORE, LOAD_CST, etc).\n  \n*/\n   \nenum class ILInstructionType{\n    // Register to register\n    MOV_CST,    // reg <- cst\n    MOV_REG,    // reg <- reg\n    AMOV_CST,   // reg <- reg OP cst\n    AMOV_REG,   // reg <- reg OP reg\n    // Read from memory\n    LOAD,       // reg <- mem(reg + offset)\n    ALOAD,      // reg OP<- mem(reg + offset)\n    LOAD_CST,   // reg <- mem(offset)\n    ALOAD_CST,  // reg OP<- mem(offset)\n    // Store to memory\n    STORE,      // mem(reg + offset) <- reg\n    ASTORE,     // mem(reg + offset) OP<- reg\n    CST_STORE,  // mem(offset) <- reg\n    CST_ASTORE,  // mem(offset) OP<- reg\n    STORE_CST,      // mem(reg + offset) <- cst\n    ASTORE_CST,     // mem(reg + offset) OP<- cst\n    CST_STORE_CST,  // mem(offset) <- cst\n    CST_ASTORE_CST,  // mem(offset) OP<- cst\n    CST_STORE_STRING, // mem(offset) <- string \n    // jump\n    JMP,         // PC <- reg\n    // Call functions\n    FUNCTION,\n    SYSCALL,\n    SINGLE_SYSCALL\n};\n\n/* IL - Instruction\n   ================\n  \n  IL instructions are represented by a simple class which holds the\n  instruction type and the list of arguments of the instruction. The \n  argument are ordered as defined in the strategy.hpp file (it holds \n  #define enums for gadget arguments and IL arguments since those are\n  very similar in most cases).\n  \n  For example to access the destination register of a MOV_CST instruction\n  we do:   instr.args[PARAM_MOVCST_DST_REG].\n\n  Instructions are build directly from a string, examples are:\n    \"eax += ebx\"\n    \"ecx = 678\"\n    \"esi = ebx ^ 0xdead\"\n    \"[edx+8] *= 2\"\n    \"edx = [ecx]\"\n*/\n\n#define IL_FUNC_ARG_REG 0\n#define IL_FUNC_ARG_CST 1\n\nclass ILInstruction{\npublic:\n    ILInstructionType type;\n    string syscall_name; // Used for SYSCALL\n    int syscall_num; // Use for SYSCALL\n    string str; // Use for STORE_STRING\n    vector<cst_t> args;\n    vector<int> args_type; // Used for FUNCTION\n    ILInstruction(ILInstructionType type, vector<cst_t>* args=nullptr, vector<int>* args_type = nullptr, string syscall_name=\"\",\n            int syscall_num = -1, string str=\"\");\n    ILInstruction(Arch& arch, string instr_str); // raises il_exception if instr_str is invalid\n};\n\n#endif\n"
  },
  {
    "path": "libropium/include/ir.hpp",
    "content": "#ifndef IR_H\n#define IR_H\n\n#include <vector>\n#include <unordered_map>\n#include \"expression.hpp\"\n#include \"simplification.hpp\"\n\nusing std::unordered_map;\n\n/* Type aliasing */ \ntypedef unsigned int IRVar;\ntypedef unsigned int IRBasicBlockId;\n\n/* IR Operations \n ===============\nIR supports basic arithmetic and logical operations, \nstore/load operations, and register 'mov'  \nIt also has two branchment instructions:\n - BCC : conditionnal jump to an IRBasicBlock\n - JCC : conditionnal jump to an IRBlock\nAnd two special instructions: \n - INT\n - SYSCALL\n*/\n\nenum class IROperation{\n    /* Arithmetic and logical operations */\n    ADD,\n    SUB,\n    MUL,\n    MULH,\n    SMULL,\n    SMULH,\n    DIV,\n    SDIV,\n    NEG,\n    AND,\n    OR,\n    XOR,\n    NOT,\n    SHL,\n    SHR,\n    MOD,\n    SMOD,\n    /* Memory read and write */\n    LDM,\n    STM,\n    /* Set register with a value */\n    MOV,\n    /* Conditionnal jumps */\n    BCC, // Internal, to same IRBlock. Used for conditionnal instructions\n    JCC, // External, to other IRBlock Used for branch instructions \n    /* Boolean flag set if zero */\n    BISZ,\n    /* Concatenate two variables */\n    CONCAT,\n    /* System calls and interrupt */\n    INT,\n    SYSCALL\n};\nbool iroperation_is_assignment(IROperation& op);\nbool iroperation_is_memory(IROperation& op);\nostream& operator<<(ostream& os, IROperation& op);\n\n/* Values for syscalls */\n#define SYSCALL_X86_INT80 1\n#define SYSCALL_X86_SYSENTER 2\n#define SYSCALL_X64_SYSCALL 3\n\n\n/* IR Operations \n ===============\nIR operands can be of 3 main types.\n    - CST: a constant operand \n    - VAR: a operand representing a register of the disassembled arch\n    - TMP: temporary registers used to model complex operations, they don't \n           correspond to actual processor registers\n    - NONE: represents the fact that there is no argument used */ \nenum class IROperandType{\n    CST,\n    VAR,\n    TMP,\n    NONE\n};\n\nclass IROperand{\n    cst_t _val;\npublic:\n    IROperandType type;\n    exprsize_t high, low, size;\n    \n    IROperand();\n    IROperand(IROperandType t, cst_t val, exprsize_t high, exprsize_t low);\n    \n    bool is_cst();\n    bool is_var();\n    bool is_tmp();\n    bool is_none();\n    \n    cst_t cst();\n    IRVar var();\n    IRVar tmp();\n};\n\nostream& operator<<(ostream& os, IROperand& op);\n/* Helpers to create operands */\nIROperand ir_cst(cst_t val, exprsize_t high, exprsize_t low);\nIROperand ir_var(cst_t num, exprsize_t high, exprsize_t low);\nIROperand ir_tmp(cst_t num, exprsize_t high, exprsize_t low);\nIROperand ir_none();\n\n/* IR Instructions\n   ===============\nIR Instructions are composed of an IROperation, and 3\nIROperands: one destination, and two (optional) sources  */\n\nclass IRInstruction{\npublic:\n    addr_t addr;\n    IROperation op;\n    IROperand dst;\n    IROperand src1;\n    IROperand src2;\n    \n    IRInstruction(IROperation op, IROperand dst, IROperand src1, addr_t addr = 0);\n    IRInstruction(IROperation op, IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\n    bool reads_var(IRVar var);\n    bool writes_var(IRVar var);\n    bool uses_var(IRVar var);\n    bool reads_tmp(IRVar tmp);\n    bool writes_tmp(IRVar tmp);\n    vector<IROperand> used_vars_read();\n    vector<IROperand> used_vars_write();\n    vector<IROperand> used_tmps_read();\n    vector<IROperand> used_tmps_write();\n};\n\nostream& operator<<(ostream& os, IRInstruction& ins);\n/* Helpers to create instructions */\nIRInstruction ir_add(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_sub(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_mul(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_mulh(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_smull(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_smulh(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_div(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_sdiv(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_and(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_or(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_xor(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_shl(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_shr(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_mod(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_smod(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_neg(IROperand dst, IROperand src1, addr_t addr = 0);\nIRInstruction ir_not(IROperand dst, IROperand src1, addr_t addr = 0);\nIRInstruction ir_ldm(IROperand dst, IROperand src1, addr_t addr = 0);\nIRInstruction ir_stm(IROperand dst, IROperand src1, addr_t addr = 0);\nIRInstruction ir_mov(IROperand dst, IROperand src1, addr_t addr = 0);\nIRInstruction ir_bcc(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_jcc(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_bisz(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_concat(IROperand dst, IROperand src1, IROperand src2, addr_t addr = 0);\nIRInstruction ir_int(IROperand num, IROperand ret, addr_t addr = 0);\nIRInstruction ir_syscall(IROperand type, IROperand ret, addr_t addr = 0);\n\n/* IRContext \n   =========\nHolds current expressions for every register */\n\nclass IRContext{\nfriend class BreakpointManager;\n    Expr* _var;\n    int _nb_var;\npublic:\n    IRContext();\n    IRContext(IRVar nb_var);\n    ~IRContext();\n    int nb_vars();\n    /* Get and set IR variables */\n    void set(IRVar num, Expr e);\n    Expr get(IRVar num);\n};\nostream& operator<<(ostream& os, IRContext& ctx);\n\n/* MemEngine \n   ========== */\nclass MemContext{\npublic:\n    unordered_map<Expr, Expr> writes;\n    ExprSimplifier* simp;\n    \n    MemContext();\n    void write(Expr addr, Expr expr);\n    Expr read(Expr addr, int octets);\n    ~MemContext();\n};\nostream& operator<<(ostream& os, MemContext& ctx);\n\n/* Type aliasing */\ntypedef vector<IRInstruction> IRBasicBlock;\n\n\n/* IRBlock\n   =======\n   An IRBlock represents a basic block in assembly. By basic block we mean\n   a sequence of contiguous instructions that are executed sequentially (so\n   no branchement instruction in the middle of a basic block, only at the\n   end).\n\n    An IRBlock is **uniquely** identifier by its start address ! There is a 'name'\n    field but it's just here for convenience.\n\n    An IRBlock is made of several IRBasicBlocks (which are just lists of\n    IRInstructions). It also holds several \"meta\" informations like the number\n    of tmp ir vars it holds, it's size in IR, in raw assembly, the branchment\n    type it finishes with, etc.\n*/\nclass IRBlock{\npublic:\n    vector<IRBasicBlock> _bblocks;\n    int _nb_tmp_vars; // Number of tmp variables used in the block\n    int _nb_instr, _nb_instr_ir;\n    addr_t start_addr, end_addr;\n    string name;\n    unsigned int ir_size;\n    unsigned int raw_size;\n    cst_t max_sp_inc;\n    bool known_max_sp_inc;\n    bool dereferenced_regs[128];\n    bool ends_with_syscall;\n    bool ends_with_int80;\n\n    addr_t branch_target[2]; // [0]: target when condition expression is 0\n                             // [1]: target when condition expression is != 0\n    IRBlock(string name, addr_t start=0, addr_t end=0);\n    void add_instr(IRBasicBlockId bblock, IRInstruction instr);\n    /* Manage IR Basic Blocks */\n    IRBasicBlockId new_bblock();\n    IRBasicBlock& get_bblock(IRBasicBlockId id);\n    int nb_bblocks();\n    vector<IRBasicBlock>& bblocks();\n};\n\nostream& operator<<(ostream& os, IRBlock& blk);\n\n\n\n\n#endif\n\n"
  },
  {
    "path": "libropium/include/ropchain.hpp",
    "content": "#ifndef ROPCHAIN_H\n#define ROPCHAIN_H\n\n#include \"symbolic.hpp\"\n#include \"arch.hpp\"\n#include \"utils.hpp\"\n#include <string>\n\nusing std::string;\n\n\n/* ======== Gadgets ========== */\n\nenum class BranchType{\n    RET,\n    JMP,\n    ANY, // Any of RET or JMP, not SYSCALL or INT80\n    SYSCALL,\n    INT80,\n    NONE,\n};\n\nclass Gadget{\npublic:\n    int id; // To be set by the db when gadget is added\n    int bin_num; // Identifies the binary/library it comes from\n    string asm_str, _hex_str;\n    Semantics* semantics;\n    vector<addr_t> addresses; \n    /* Number of instructions in the gadget */\n    int nb_instr, nb_instr_ir; \n    // Info about gadget semantics\n    cst_t sp_inc;\n    cst_t max_sp_inc;\n    BranchType branch_type;\n    reg_t jmp_reg;\n    bool modified_regs[NB_REGS_MAX];\n    bool dereferenced_regs[NB_REGS_MAX];\n\n    // Constructor\n    Gadget();\n    ~Gadget();\n    // Other\n    void add_address(addr_t addr);\n    void print(ostream& os);\n    bool lthan(Gadget& other);\n};\nostream& operator<<(ostream& os, Gadget& g);\n\n\n/* ======== ROPChain ========== */\n\nenum class ROPItemType{\n    GADGET,\n    PADDING,\n    GADGET_ADDRESS\n};\n\nclass ROPItem{\npublic:\n    ROPItemType type;\n    Gadget* gadget; // If gadget\n    addr_t addr;\n    cst_t value; // If cst or padding\n    string msg;\n    \n    ROPItem(addr_t a, Gadget* g, string m=\"\"):type(ROPItemType::GADGET), addr(a), value(-1), gadget(g), msg(m){};\n    ROPItem(ROPItemType t, cst_t v, string m=\"\"):type(t), value(v), msg(m), addr(0), gadget(nullptr){};\n};\n\nclass ROPChain{\npublic:\n    Arch *arch; // Not owned\n    vector<ROPItem> items;\n\n    ROPChain(Arch* arch);\n    void add_gadget(addr_t addr, Gadget* gadget);\n    void add_padding(cst_t val, string m=\"\");\n    void add_gadget_address(cst_t addr, string m = \"\");\n    void add_chain(ROPChain& other);\n    int len();\n    void print_pretty(ostream& os, string tab=\"\");\n    void print_python(ostream& os, string tab=\"\");\n    void dump_raw(vector<uint8_t>& bytes);\n};\n\nostream& operator<<(ostream& os, ROPChain& ropchain);\n\n#endif\n"
  },
  {
    "path": "libropium/include/ropium.hpp",
    "content": "#ifndef ROPIUM_H\n#define ROPIUM_H\n\n#include \"expression.hpp\"\n#include \"simplification.hpp\"\n#include \"arch.hpp\"\n#include \"assertion.hpp\"\n#include \"memory.hpp\"\n#include \"ir.hpp\"\n#include \"il.hpp\"\n#include \"constraint.hpp\"\n#include \"disassembler.hpp\"\n#include \"ropchain.hpp\"\n#include \"exception.hpp\"\n#include \"database.hpp\"\n#include \"symbolic.hpp\"\n#include \"strategy.hpp\"\n#include \"utils.hpp\"\n\n#endif\n"
  },
  {
    "path": "libropium/include/simplification.hpp",
    "content": "#ifndef SIMPLIFICATION_H\n#define SIMPLIFICATION_H\n\n#include \"expression.hpp\"\n#include <vector>\n\nusing std::vector; \n\n/* Forward declaration */ \nclass ExprSimplifier;\n\n/* Type aliasing */\ntypedef Expr (*ExprSimplifierFunc)(Expr);\ntypedef Expr (*RecExprSimplifierFunc)(Expr, ExprSimplifier&);\n\n/* Expression simplifier */\nclass ExprSimplifier{\nprotected:\n    vector<ExprSimplifierFunc> simplifiers;\n    vector<RecExprSimplifierFunc> rec_simplifiers;\n    vector<RecExprSimplifierFunc> restruct_simplifiers;\n    Expr run_simplifiers(Expr e);\npublic:\n    ExprSimplifier();\n    Expr simplify(Expr e);\n    void add(ExprSimplifierFunc func);\n    void add(RecExprSimplifierFunc func);\n    void add_restruct(RecExprSimplifierFunc func);\n};\n\nExprSimplifier* NewDefaultExprSimplifier();\n\n/* Simplification functions */\nExpr es_constant_folding(Expr e);\nExpr es_neutral_elements(Expr e);\nExpr es_absorbing_elements(Expr e);\nExpr es_arithmetic_properties(Expr e);\nExpr es_involution(Expr e);\nExpr es_extract_patterns(Expr e);\nExpr es_basic_transform(Expr e);\nExpr es_logical_properties(Expr e);\nExpr es_concat_patterns(Expr e);\nExpr es_arithmetic_factorize(Expr e);\nExpr es_generic_factorize(Expr e);\nExpr es_generic_distribute(Expr e);\nExpr es_deep_associative(Expr e, ExprSimplifier& simp);\n\n#endif\n"
  },
  {
    "path": "libropium/include/strategy.hpp",
    "content": "#ifndef STRATEGY_H\n#define STRATEGY_H\n\n#include \"database.hpp\"\n#include \"expression.hpp\"\n#include \"constraint.hpp\"\n#include \"assertion.hpp\"\n#include <vector>\n#include <string>\n#include <array>\n\nusing std::stringstream;\nusing std::vector;\nusing std::array;\n\n/* Forward declaration */\ntypedef int node_t;\ntypedef int param_t;\n\nenum class ParamType{\n    CST,\n    REG,\n    OP,\n    NONE\n};\n\nstruct ParamDep{\n    node_t node;\n    param_t param_type;\n};\n\nclass Param{\npublic:\n    ParamType type; \n    string name; // Name for the param (used for 'free' constants only)\n    // Value\n    cst_t value; // Used to put constant OR regnum\n    // Dependencies\n    vector<ParamDep> deps;\n    Expr expr; // For constants only\n    bool is_fixed;\n    bool is_data_link; \n\n    Param():type(ParamType::NONE), name(\"\"), value(-1), expr(nullptr), is_fixed(true), is_data_link(false){};\n\n    void add_dep(node_t n, param_t p){\n        // Check if already present\n        for( ParamDep& dep : deps )\n            if( dep.node == n && dep.param_type == p )\n                return;\n        // If not present, add the dependency\n        deps.push_back(ParamDep{n, p});\n    };\n    \n    bool depends_on(node_t n){\n        for( ParamDep& dep : deps ){\n            if( dep.node == n )\n                return true;\n        }\n        return false;\n    };\n    \n    // Fixed or free register\n    void make_reg(int reg, bool fixed=true){\n        type = ParamType::REG;\n        value = reg;\n        is_fixed = fixed;\n        deps.clear();\n        expr = nullptr;\n        is_data_link = false;\n    };\n    \n    // Dependent register\n    void make_reg(node_t dn, int dpt){\n        type = ParamType::REG;\n        value = -1;\n        is_fixed = false;\n        deps.clear();\n        add_dep(dn, dpt);\n        expr = nullptr;\n        is_data_link = false;\n    };\n    \n    // Fixed or free constant\n    void make_cst(cst_t val, string n, bool fixed=true){\n        type = ParamType::CST;\n        name = n;\n        value = val;\n        is_fixed = fixed;\n        deps.clear();\n        expr = nullptr;\n        is_data_link = false;\n    };\n    \n    // Dependent constant\n    void make_cst(node_t dn, int dpt, Expr e, string n){\n        type = ParamType::CST;\n        name = n;\n        value = 0;\n        is_fixed = false;\n        deps.clear();\n        add_dep(dn, dpt);\n        expr = e;\n        is_data_link = false;\n    };\n\n    // Operator\n    void make_op(Op op){\n        type = ParamType::OP;\n        value = (int)op;\n        is_fixed = true;\n        deps.clear();\n        expr = nullptr;\n        is_data_link = false;\n    };\n\n    bool is_dependent(){return !is_fixed && !deps.empty();};\n    bool is_free(){return !is_dependent() && !is_fixed;};\n    bool is_cst(){return type == ParamType::CST;};\n    bool is_reg(){return type == ParamType::REG;};\n};\n\nostream& operator<<(ostream& os, Param& param);\n\n\nstruct EdgeSet{\n    vector<node_t> in;\n    vector<node_t> out;\n};\n\nclass UniqueNameGenerator{\nprivate:\n    int n;\npublic:\n    UniqueNameGenerator():n(0){};\n    string new_name(string& name){\n        stringstream ss;\n        ss << name << \"_\" << std::dec << n;\n        n++;\n        return ss.str();\n    };\n};\n\n/* Different kinds parameters for nodes/IL instructions\n   ====================================================\n   WARNING: their values have to match the place they have in the tuple\n   when the gadgets are addded in the database !\n*/\n#define MAX_PARAMS 9\n\n#define PARAM_MOVREG_DST_REG 0\n#define PARAM_MOVREG_SRC_REG 1\n#define PARAM_MOVREG_GADGET_ADDR 2\n#define PARAM_MOVREG_GADGET_SP_INC 3\n#define PARAM_MOVREG_GADGET_JMP_REG 4\n#define PARAM_MOVREG_GADGET_SP_DELTA 5\n#define PARAM_MOVREG_DATA_LINK 6\n#define NB_PARAM_MOVREG 7\n\n#define PARAM_MOVCST_DST_REG 0\n#define PARAM_MOVCST_SRC_CST 1\n#define PARAM_MOVCST_GADGET_ADDR 2\n#define PARAM_MOVCST_GADGET_SP_INC 3\n#define PARAM_MOVCST_GADGET_JMP_REG 4\n#define PARAM_MOVCST_GADGET_SP_DELTA 5\n#define PARAM_MOVCST_DATA_LINK 6\n#define NB_PARAM_MOVCST 7\n\n#define PARAM_AMOVCST_DST_REG 0\n#define PARAM_AMOVCST_SRC_REG 1\n#define PARAM_AMOVCST_SRC_OP 2\n#define PARAM_AMOVCST_SRC_CST 3\n#define PARAM_AMOVCST_GADGET_ADDR 4\n#define PARAM_AMOVCST_GADGET_SP_INC 5\n#define PARAM_AMOVCST_GADGET_JMP_REG 6\n#define PARAM_AMOVCST_GADGET_SP_DELTA 7\n#define PARAM_AMOVCST_DATA_LINK 8\n#define NB_PARAM_AMOVCST 9\n\n#define PARAM_AMOVREG_DST_REG 0\n#define PARAM_AMOVREG_SRC_REG1 1\n#define PARAM_AMOVREG_SRC_OP 2\n#define PARAM_AMOVREG_SRC_REG2 3\n#define PARAM_AMOVREG_GADGET_ADDR 4\n#define PARAM_AMOVREG_GADGET_SP_INC 5\n#define PARAM_AMOVREG_GADGET_JMP_REG 6\n#define PARAM_AMOVREG_GADGET_SP_DELTA 7\n#define PARAM_AMOVREG_DATA_LINK 8\n#define NB_PARAM_AMOVREG 9\n\n#define PARAM_LOAD_DST_REG 0\n#define PARAM_LOAD_SRC_ADDR_REG 1\n#define PARAM_LOAD_SRC_ADDR_OFFSET 2\n#define PARAM_LOAD_GADGET_ADDR 3\n#define PARAM_LOAD_GADGET_SP_INC 4\n#define PARAM_LOAD_GADGET_JMP_REG 5\n#define PARAM_LOAD_GADGET_SP_DELTA 6\n#define PARAM_LOAD_DATA_LINK 7\n#define NB_PARAM_LOAD 8\n\n#define PARAM_ALOAD_DST_REG 0\n#define PARAM_ALOAD_OP 1\n#define PARAM_ALOAD_SRC_ADDR_REG 2\n#define PARAM_ALOAD_SRC_ADDR_OFFSET 3\n#define PARAM_ALOAD_GADGET_ADDR 4\n#define PARAM_ALOAD_GADGET_SP_INC 5\n#define PARAM_ALOAD_GADGET_JMP_REG 6\n#define PARAM_ALOAD_GADGET_SP_DELTA 7\n#define PARAM_ALOAD_DATA_LINK 8\n#define NB_PARAM_ALOAD 9\n\n#define PARAM_LOADCST_DST_REG 0\n#define PARAM_LOADCST_SRC_ADDR_OFFSET 1\n#define PARAM_LOADCST_GADGET_ADDR 2\n#define PARAM_LOADCST_GADGET_SP_INC 3\n#define PARAM_LOADCST_GADGET_JMP_REG 4\n#define PARAM_LOADCST_GADGET_SP_DELTA 5\n#define NB_PARAM_LOADCST 6\n\n#define PARAM_ALOADCST_DST_REG 0\n#define PARAM_ALOADCST_OP 1\n#define PARAM_ALOADCST_SRC_ADDR_OFFSET 2\n#define PARAM_ALOADCST_GADGET_ADDR 3\n#define PARAM_ALOADCST_GADGET_SP_INC 4\n#define PARAM_ALOADCST_GADGET_JMP_REG 5\n#define PARAM_ALOADCST_GADGET_SP_DELTA 6\n#define NB_PARAM_ALOADCST 7\n\n#define PARAM_STORE_DST_ADDR_REG 0\n#define PARAM_STORE_DST_ADDR_OFFSET 1\n#define PARAM_STORE_SRC_REG 2\n#define PARAM_STORE_GADGET_ADDR 3\n#define PARAM_STORE_GADGET_SP_INC 4\n#define PARAM_STORE_GADGET_JMP_REG 5\n#define PARAM_STORE_GADGET_SP_DELTA 6\n#define PARAM_STORE_DATA_LINK 7\n#define NB_PARAM_STORE 8\n\n#define PARAM_CSTSTORE_DST_ADDR_OFFSET 0\n#define PARAM_CSTSTORE_SRC_REG 1\n#define PARAM_CSTSTORE_GADGET_ADDR 2\n#define PARAM_CSTSTORE_GADGET_SP_INC 3\n#define PARAM_CSTSTORE_GADGET_JMP_REG 4\n#define PARAM_CSTSTORE_GADGET_SP_DELTA 5\n#define NB_PARAM_CSTSTORE 6\n\n#define PARAM_ASTORE_DST_ADDR_REG 0\n#define PARAM_ASTORE_DST_ADDR_OFFSET 1\n#define PARAM_ASTORE_OP 2\n#define PARAM_ASTORE_SRC_REG 3\n#define PARAM_ASTORE_GADGET_ADDR 4\n#define PARAM_ASTORE_GADGET_SP_INC 5\n#define PARAM_ASTORE_GADGET_JMP_REG 6\n#define PARAM_ASTORE_GADGET_SP_DELTA 7\n#define PARAM_ASTORE_DATA_LINK 8\n#define NB_PARAM_ASTORE 9\n\n#define PARAM_CSTASTORE_DST_ADDR_OFFSET 0\n#define PARAM_CSTASTORE_OP 1\n#define PARAM_CSTASTORE_SRC_REG 2\n#define PARAM_CSTASTORE_GADGET_ADDR 3\n#define PARAM_CSTASTORE_GADGET_SP_INC 4\n#define PARAM_CSTASTORE_GADGET_JMP_REG 5\n#define PARAM_CSTASTORE_GADGET_SP_DELTA 6\n#define NB_PARAM_CSTASTORE 7\n\n\n#define PARAM_STORECST_DST_ADDR_REG 0\n#define PARAM_STORECST_DST_ADDR_OFFSET 1\n#define PARAM_STORECST_SRC_CST 2\n#define PARAM_STORECST_GADGET_ADDR 3\n#define PARAM_STORECST_GADGET_SP_INC 4\n#define PARAM_STORECST_GADGET_JMP_REG 5\n#define PARAM_STORECST_GADGET_SP_DELTA 6\n#define NB_PARAM_STORECST 7\n\n#define PARAM_CSTSTORECST_DST_ADDR_OFFSET 0\n#define PARAM_CSTSTORECST_SRC_CST 1\n#define PARAM_CSTSTORECST_GADGET_ADDR 2\n#define PARAM_CSTSTORECST_GADGET_SP_INC 3\n#define PARAM_CSTSTORECST_GADGET_JMP_REG 4\n#define PARAM_CSTSTORECST_GADGET_SP_DELTA 5\n#define NB_PARAM_CSTSTORECST 6\n\n#define PARAM_ASTORECST_DST_ADDR_REG 0\n#define PARAM_ASTORECST_DST_ADDR_OFFSET 1\n#define PARAM_ASTORECST_OP 2\n#define PARAM_ASTORECST_SRC_CST 3\n#define PARAM_ASTORECST_GADGET_ADDR 4\n#define PARAM_ASTORECST_GADGET_SP_INC 5\n#define PARAM_ASTORECST_GADGET_JMP_REG 6\n#define PARAM_ASTORECST_GADGET_SP_DELTA 7\n#define NB_PARAM_ASTORECST 8\n\n#define PARAM_CSTASTORECST_DST_ADDR_OFFSET 0\n#define PARAM_CSTASTORECST_OP 1\n#define PARAM_CSTASTORECST_SRC_CST 2\n#define PARAM_CSTASTORECST_GADGET_ADDR 3\n#define PARAM_CSTASTORECST_GADGET_SP_INC 4\n#define PARAM_CSTASTORECST_GADGET_JMP_REG 5\n#define PARAM_CSTASTORECST_GADGET_SP_DELTA 6\n#define NB_PARAM_CSTASTORECST 7\n\n#define PARAM_FUNCTION_ADDR 0\n#define PARAM_FUNCTION_ARGS 1\n\n#define PARAM_SYSCALL_ARGS 0 // For IL\n#define PARAM_SYSCALL_GADGET_ADDR 0 // For gadget\n#define PARAM_SYSCALL_GADGET_SP_INC 1\n#define PARAM_SYSCALL_GADGET_JMP_REG 2\n#define PARAM_SYSCALL_GADGET_SP_DELTA 3\n#define PARAM_SYSCALL_DATA_LINK 4\n#define NB_PARAM_SYSCALL 5\n\n#define PARAM_INT80_ARGS 0 // For IL\n#define PARAM_INT80_GADGET_ADDR 0 // For gadget\n#define PARAM_INT80_GADGET_SP_INC 1\n#define PARAM_INT80_GADGET_JMP_REG 2\n#define PARAM_INT80_GADGET_SP_DELTA 3\n#define PARAM_INT80_DATA_LINK 4\n#define NB_PARAM_INT80 5\n\n#define PARAM_CSTSTORE_STRING_ADDR_OFFSET 0\n\ntypedef struct {\n    Param offset;\n    Param value;\n} ROPPadding;\n\nclass Node;\n\nclass NodeValidPointers{\n    vector<param_t> _params;\npublic:\n    void add_valid_pointer(param_t param);\n    void to_assertion(Node& node, Assertion* assertion);\n    void clear();\n};\n\nclass NodeAssertion{\npublic:\n    NodeValidPointers valid_pointers;\n    void to_assertion(Node& node, Assertion* a);\n    void clear();\n};\n\n\n// Callback for custom constraints called to filter gadgets on each node\nclass Node;\nclass StrategyGraph;\ntypedef bool (*constraint_callback_t)(Node* node, StrategyGraph* graph, Arch* arch);\n\n// Commonly used node constraints\nbool constraint_branch_type(Node* node, StrategyGraph* graph, Arch* arch);\n\nclass Node{\npublic:\n    int id;\n    bool is_indirect;\n    bool is_disabled;\n    GadgetType type;\n    // Edges\n    EdgeSet strategy_edges; \n    EdgeSet param_edges;\n    EdgeSet interference_edges;\n    // Parameters\n    Param params[MAX_PARAMS];\n    // Affected gadget\n    Gadget* affected_gadget;\n    // Constraint\n    vector<constraint_callback_t> strategy_constraints;\n    vector<constraint_callback_t> assigned_gadget_constraints;\n    // Branch type (RET, JMP, ANY, ...)\n    BranchType branch_type;\n    // Gadget paddings\n    vector<ROPPadding> special_paddings;\n    // Assertions\n    NodeAssertion node_assertion;\n    Assertion assertion;\n    // Mandatory Following node\n    node_t mandatory_following_node;\n\n    Node(int i, GadgetType t);\n    int nb_params();\n    bool has_free_param();\n    bool has_mandatory_following_node();\n    // Manage edges\n    void add_incoming_strategy_edge(node_t src_node);\n    void add_incoming_param_edge(node_t src_node);\n    void add_outgoing_strategy_edge(node_t dst_node);\n    void add_outgoing_param_edge(node_t dst_node);\n    void remove_incoming_strategy_edge(node_t src_node);\n    void remove_incoming_param_edge(node_t src_node);\n    void remove_outgoing_strategy_edge(node_t dst_node);\n    void remove_outgoing_param_edge(node_t dst_node);\n\n    bool is_initial_param(param_t param);\n    bool is_final_param(param_t param);\n    bool is_src_param(param_t param);\n    bool is_generic_param(param_t param);\n\n    int get_param_num_gadget_sp_inc();\n    int get_param_num_gadget_addr();\n    int get_param_num_gadget_jmp_reg();\n    int get_param_num_gadget_sp_delta();\n    int get_param_num_src_reg();\n    int get_param_num_dst_reg();\n    int get_param_num_src_addr_offset();\n    int get_param_num_src_addr_reg();\n    int get_param_num_dst_addr_offset();\n    int get_param_num_dst_addr_reg();\n    int get_param_num_data_link();\n    bool has_dst_reg_param();\n    bool has_dst_addr_reg_param();\n\n    bool assign_gadget(Gadget* gadget, Arch* arch=nullptr, Constraint* constraint=nullptr);\n    void apply_assertion();\n    bool modifies_reg(int reg_num);\n};\n\nostream& operator<<(ostream& os, Node& node);\n\nclass InterferencePoint {\npublic:\n    node_t interfering_node;\n    node_t start_node;\n    node_t end_node;\n    InterferencePoint(node_t i, node_t s, node_t e):interfering_node(i), start_node(s), end_node(e){};\n};\n\n/* Strategy graph */\nclass StrategyGraph{\nprivate:\n    UniqueNameGenerator name_generator;\n    void _dfs_strategy_explore(vector<node_t>& marked, node_t n);\n    void _dfs_params_explore(vector<node_t>& marked, node_t n);\n    bool _dfs_scheduling_explore(vector<node_t>& marked, node_t n);\n    void _resolve_param(Param& param);\n    void _resolve_all_params(node_t n);\n    const vector<Gadget*>& _get_matching_gadgets(GadgetDB& db, node_t node);\n    PossibleGadgets* _get_possible_gadgets(GadgetDB& db, node_t n);\n    bool _check_strategy_constraints(Node& node, Arch* arch);\n    bool _check_assigned_gadget_constraints(Node& node, Arch* arch);\n    bool _check_special_padding_constraints(Node& node, Arch* arch, Constraint* constraint=nullptr);\n    bool _do_scheduling(int interference_idx=0);\n    \n    bool has_gadget_selection;\n    VarContext params_ctx;\n    int _depth;\n    vector<InterferencePoint> interference_points;\n\npublic:\n    string _history;\n\n    int size;\n    vector<Node> nodes;\n    vector<node_t> dfs_strategy;\n    vector<node_t> dfs_params;\n    vector<node_t> dfs_scheduling;\n\n    StrategyGraph();\n    // Create new nodes/edges\n    node_t new_node(GadgetType t);\n    string new_name(string base);\n    void disable_node(node_t node);\n    void redirect_param_edges(node_t curr_node, param_t curr_param_type, node_t new_node, param_t new_param_type);\n    void redirect_incoming_strategy_edges(node_t curr_node, node_t new_node);\n    void redirect_outgoing_strategy_edges(node_t curr_node, node_t new_node);\n    void redirect_generic_param_edges(node_t curr_node, node_t new_node);\n    void add_strategy_edge(node_t from, node_t to);\n    void add_param_edge(node_t from, node_t to);\n    void add_interference_edge(node_t from, node_t to);\n    void update_param_edges();\n    void update_size();\n    void clear_interference_edges(node_t n);\n    bool modifies_reg(node_t n, int reg_num, bool check_following_node=false);\n    // Strategy rules\n    bool rule_mov_cst_pop(node_t n, Arch* arch);\n    bool rule_generic_transitivity(node_t n);\n    bool rule_generic_src_transitivity(node_t n);\n    bool rule_generic_adjust_jmp(node_t n, Arch* arch);\n    bool rule_adjust_load(node_t n, Arch* arch);\n    bool rule_adjust_store(node_t n, Arch* arch);\n    // Ordering\n    void compute_dfs_strategy();\n    void compute_dfs_params();\n    bool compute_dfs_scheduling();\n\n    // Gadget selection\n    bool select_gadgets(GadgetDB& db, Constraint* constraint=nullptr, Arch* arch=nullptr, node_t dfs_idx=-1);\n    ROPChain* get_ropchain(Arch* arch, Constraint* constraint=nullptr);\n\n    // Scheduling\n    void compute_interference_points();\n    bool schedule_gadgets();\n    bool has_dependent_param(node_t node, param_t param);\n\n    // Copy\n    StrategyGraph* copy();\n};\n\nostream& operator<<(ostream& os, StrategyGraph& graph);\n\n#endif\n"
  },
  {
    "path": "libropium/include/symbolic.hpp",
    "content": "#ifndef SYMBOLIC_H\n#define SYMBOLIC_H\n\n#include \"ir.hpp\"\n#include \"expression.hpp\"\n#include \"simplification.hpp\"\n#include \"arch.hpp\"\n\nusing std::tuple;\nusing std::string;\n\n/* Semantics\n   =========  */\nclass Semantics{\npublic:\n    IRContext* regs; // Takes ownership\n    MemContext* mem; // Takes ownership\n    Semantics(IRContext* regs, MemContext* mem);\n    void simplify();\n    ~Semantics();\n};\n\nostream& operator<<(ostream&, Semantics& s);\n\n/* SymbolicEngine\n   ============== */\n\nclass SymbolicEngine{\npublic:\n    Arch* arch;\n\n    SymbolicEngine(ArchType arch);\n    ~SymbolicEngine();\n    Semantics* execute_block(IRBlock* block);\n};\n\n#endif\n"
  },
  {
    "path": "libropium/include/systems.hpp",
    "content": "#ifndef SYSTEMS_H\n#define SYSTEMS_H\n\n#include <string>\n#include \"arch.hpp\"\n\nusing std::string;\n\nenum class System{\n    LINUX,\n    WINDOWS,\n    NONE\n};\n\n// Specification of a syscall\nclass SyscallDef{\npublic:\n    string name;\n    int nb_args;\n    cst_t num;\n    SyscallDef(string n, cst_t sysn, int nb):name(n), nb_args(nb), num(sysn){};\n};\n\n// Get syscall definition by name\nSyscallDef* get_syscall_def(ArchType arch, System sys, string name);\n\n#endif\n"
  },
  {
    "path": "libropium/include/utils.hpp",
    "content": "#ifndef UTILS_H\n#define UTILS_H\n\n#include <string>\n#include <cstdint>\n#include <vector>\n#include \"expression.hpp\"\n\nusing std::string;\nusing std::vector;\n\n/* ======== Raw gadgets interface ======== */\nclass RawGadget{\npublic:\n    RawGadget(){};\n    RawGadget(string r, uint64_t a):raw(r), addr(a){}\n    string raw;\n    uint64_t addr;\n};\n\n// Read gadgets from file\nvector<RawGadget>* raw_gadgets_from_file(string filename);\n// Write gadgets to file from ROPgadget output\nbool ropgadget_to_file(string filename, string ropgadget_tmp_file, string bin);\n\n/* ========= Support for hashing tuples ========== */\n#include <tuple>\n// function has to live in the std namespace \n// so that it is picked up by argument-dependent name lookup (ADL).\nnamespace std{\n    namespace\n    {\n        // Code from boost\n        // Reciprocal of the golden ratio helps spread entropy\n        //     and handles duplicates.\n        // See Mike Seymour in magic-numbers-in-boosthash-combine:\n        //     https://stackoverflow.com/questions/4948780\n\n        template <class T>\n        inline void hash_combine(std::size_t& seed, T const& v)\n        {\n            seed ^= hash<T>()(v) + 0x9e3779b9 + (seed<<6) + (seed>>2);\n        }\n\n        // Recursive template code derived from Matthieu M.\n        template <class Tuple, size_t Index = std::tuple_size<Tuple>::value - 1>\n        struct HashValueImpl\n        {\n          static void apply(size_t& seed, Tuple const& tuple)\n          {\n            HashValueImpl<Tuple, Index-1>::apply(seed, tuple);\n            hash_combine(seed, get<Index>(tuple));\n          }\n        };\n\n        template <class Tuple>\n        struct HashValueImpl<Tuple,0>\n        {\n          static void apply(size_t& seed, Tuple const& tuple)\n          {\n            hash_combine(seed, get<0>(tuple));\n          }\n        };\n    }\n\n    template <typename ... TT>\n    struct hash<std::tuple<TT...>> \n    {\n        size_t\n        operator()(std::tuple<TT...> const& tt) const\n        {                                              \n            size_t seed = 0;                             \n            HashValueImpl<std::tuple<TT...> >::apply(seed, tt);    \n            return seed;                                 \n        }                                              \n\n    };\n}\n\n\n/* ========= Convert tuples to array/vector ================= */\ntemplate<int... Indices>\nstruct indices {\n    using next = indices<Indices..., sizeof...(Indices)>;\n};\n\ntemplate<int Size>\nstruct build_indices {\n    using type = typename build_indices<Size - 1>::type::next;\n};\n\ntemplate<>\nstruct build_indices<0> {\n    using type = indices<>;\n};\n\ntemplate<typename T>\nusing Bare = typename std::remove_cv<typename std::remove_reference<T>::type>::type;\n\ntemplate<typename Tuple>\nconstexpr\ntypename build_indices<std::tuple_size<Bare<Tuple>>::value>::type\nmake_indices()\n{ return {}; }\n\ntemplate<typename Tuple, int... Indices>\nstd::array<\n  cst_t,\n    std::tuple_size<Bare<Tuple>>::value\n>\nto_array(Tuple&& tuple, indices<Indices...>)\n{\n    using std::get;\n    return {{ get<Indices>(std::forward<Tuple>(tuple))... }};\n}\n\ntemplate<typename Tuple>\nauto tuple_to_array(Tuple&& tuple)\n-> decltype( to_array(std::declval<Tuple>(), make_indices<Tuple>()) )\n{\n    return to_array(std::forward<Tuple>(tuple), make_indices<Tuple>());\n}\n\ntemplate<typename Tuple>\nvector<cst_t> tuple_to_vector(Tuple&& tuple)\n{\n    auto array = tuple_to_array(tuple);\n    vector<cst_t> res;\n    for( auto& a : array ){\n        res.push_back(a);\n    }\n    return res;\n}\n\n/* =============== Printing stuff =============== */\n#define DEFAULT_ERROR_COLOR_ANSI  \"\\033[91m\"\n#define DEFAULT_BOLD_COLOR_ANSI  \"\\033[1m\"\n#define DEFAULT_SPECIAL_COLOR_ANSI  \"\\033[93m\"\n#define DEFAULT_PAYLOAD_COLOR_ANSI \"\\033[96m\"\n#define DEFAULT_EXPLOIT_DESCRIPTION_ANSI  \"\\033[95m\"\n#define DEFAULT_END_COLOR_ANSI \"\\033[0m\"\n\nstring str_bold(string s);\nstring str_special(string s);\n\nstring value_to_hex_str(int octets, addr_t addr);\n\nvoid disable_colors();\nvoid enable_colors();\n\n/* ========= Catching ctrl+C ============= */\nvoid set_sigint_handler();\nvoid unset_signint_handler();\nbool is_pending_sigint();\nvoid notify_sigint_handled();\n\n\n#endif\n"
  },
  {
    "path": "libropium/ir/ir.cpp",
    "content": "#include \"ir.hpp\"\n#include \"exception.hpp\"\n#include <iostream>\n#include <cassert>\n#include <cstring>\n#include <algorithm>\n\n/* ===================================== */\nbool iroperation_is_assignment(IROperation& op){\n    return  op == IROperation::ADD || \n            op == IROperation::SUB || \n            op == IROperation::MUL ||\n            op == IROperation::MULH ||\n            op == IROperation::SMULL ||\n            op == IROperation::SMULH || \n            op == IROperation::DIV || \n            op == IROperation::SDIV ||\n            op == IROperation::SHL ||\n            op == IROperation::SHR ||  \n            op == IROperation::NEG || \n            op == IROperation::AND || \n            op == IROperation::OR || \n            op == IROperation::XOR ||\n            op == IROperation::NOT ||\n            op == IROperation::MOD ||\n            op == IROperation::SMOD ||\n            op == IROperation::MOV ||\n            op == IROperation::CONCAT;\n}\nbool iroperation_is_memory(IROperation& op){\n    return  op == IROperation::STM ||\n            op == IROperation::LDM ;\n}\nostream& operator<<(ostream& os, IROperation& op){\n    switch(op){\n        case IROperation::ADD: os << \"ADD\"; break;\n        case IROperation::SUB: os << \"SUB\"; break;\n        case IROperation::MUL: os << \"MUL\"; break;\n        case IROperation::MULH: os << \"MUL(h)\"; break;\n        case IROperation::SMULL: os << \"SMUL(l)\"; break;\n        case IROperation::SMULH: os << \"SMUL(h)\"; break;\n        case IROperation::DIV: os << \"DIV\"; break;\n        case IROperation::SDIV: os << \"SDIV\"; break;\n        case IROperation::SHL: os << \"SHL\"; break;\n        case IROperation::SHR: os << \"SHR\"; break;\n        case IROperation::NEG: os << \"NEG\"; break;\n        case IROperation::AND: os << \"AND\"; break;\n        case IROperation::OR: os << \"OR\"; break;\n        case IROperation::XOR: os << \"XOR\"; break;\n        case IROperation::NOT: os << \"NOT\"; break;\n        case IROperation::MOV: os << \"MOV\"; break;\n        case IROperation::MOD: os << \"MOD\"; break;\n        case IROperation::SMOD: os << \"MOD\"; break;\n        case IROperation::STM: os << \"STM\"; break;\n        case IROperation::LDM: os << \"LDM\"; break;\n        case IROperation::BCC: os << \"BCC\"; break;\n        case IROperation::JCC: os << \"JCC\"; break;\n        case IROperation::BISZ: os << \"BISZ\"; break;\n        case IROperation::CONCAT: os << \"CONCAT\"; break;\n        case IROperation::INT: os << \"INT\"; break;\n        case IROperation::SYSCALL: os << \"SYSCALL\"; break;\n        default: os << \"???\"; break;\n    }\n    return os;\n}\n\n/* ===================================== */\nIROperand::IROperand(): type(IROperandType::NONE), _val(0), high(0), low(0), size(0){}\nIROperand::IROperand(IROperandType t, cst_t cst, exprsize_t h, exprsize_t l): \n    type(t), _val(cst_sign_extend(sizeof(cst_t)*8, cst)), high(h), low(l), size(h-l+1){}\n\nbool IROperand::is_cst(){ return type == IROperandType::CST; }\nbool IROperand::is_var(){ return type == IROperandType::VAR; }\nbool IROperand::is_tmp(){ return type == IROperandType::TMP; }\nbool IROperand::is_none(){ return type == IROperandType::NONE; }\n\ncst_t IROperand::cst(){ return _val; }\nIRVar IROperand::var(){ return (IRVar)_val;}\nIRVar IROperand::tmp(){return (IRVar)_val;}\n\nostream& operator<<(ostream& os, IROperand& op){\n    switch(op.type){\n        case IROperandType::CST: os << op.cst(); break;\n        case IROperandType::TMP: os << \"TMP_\" << op.tmp(); break;\n        case IROperandType::VAR: os << \"VAR_\" << op.var(); break;\n        case IROperandType::NONE: os << \"_\" ; break;\n    }\n    os << \"[\" << op.high << \":\" << op.low << \"]\";\n    return os;\n}\n\n/* Helpers to create operands */\nIROperand ir_cst(cst_t val, exprsize_t high, exprsize_t low){\n    return IROperand(IROperandType::CST, val, high, low);\n}\nIROperand ir_var(cst_t num, exprsize_t high, exprsize_t low){\n    return IROperand(IROperandType::VAR, num, high, low);\n}\nIROperand ir_tmp(cst_t num, exprsize_t high, exprsize_t low){\n    return IROperand(IROperandType::TMP, num, high, low);\n}\nIROperand ir_none(){\n    return IROperand();\n}\n\n/* ===================================== */\nIRInstruction::IRInstruction(IROperation _op, IROperand _dst, IROperand _src1, addr_t a){\n    op = _op;\n    dst = _dst;\n    src1 = _src1;\n    src2 = IROperand();\n    addr = a;\n}\nIRInstruction::IRInstruction(IROperation _op, IROperand _dst, IROperand _src1, IROperand _src2, addr_t a){\n    op = _op;\n    dst = _dst;\n    src1 = _src1;\n    src2 = _src2;\n    addr = a;\n}\n\nbool IRInstruction::reads_var(IRVar var){\n    if( iroperation_is_assignment(op)){\n        return  (src1.is_var() && src1.var() == var) || \n                (src2.is_var() && src2.var() == var);\n    }else if( iroperation_is_memory(op)){\n        return  (dst.is_var() && dst.var() == var) ||\n                (src1.is_var() && src1.var() == var) || \n                (src2.is_var() && src2.var() == var);\n    }else if( op == IROperation::BCC || op == IROperation::JCC){\n        return (dst.is_var() && dst.var() == var) ||\n                (src1.is_var() && src1.var() == var) || \n                (src2.is_var() && src2.var() == var);\n    }else if( op == IROperation::BISZ ){\n        return src1.is_var() && src1.var() == var;\n    }else{\n        throw runtime_exception(\"IRInstruction::reads_var() got unknown IROperation\");\n    }\n}\n\nbool IRInstruction::writes_var(IRVar var){\n    if( iroperation_is_assignment(op)){\n        return  (dst.is_var() && dst.var() == var);\n    }else if( iroperation_is_memory(op)){\n        return  false;\n    }else if( op == IROperation::BCC || op == IROperation::JCC){\n        return false;\n    }else if( op == IROperation::BISZ){\n        return (dst.is_var() && dst.var() == var);\n    }else{\n        throw runtime_exception(\"IRInstruction::writes_var() got unknown IROperation\");\n    }\n}\n\nbool IRInstruction::uses_var(IRVar var){\n    return reads_var(var) || writes_var(var);\n}\n\nbool IRInstruction::reads_tmp(IRVar tmp){\n    if( iroperation_is_assignment(op)){\n        return  (src1.is_tmp() && src1.tmp() == tmp) || \n                (src2.is_tmp() && src2.tmp() == tmp);\n    }else if( iroperation_is_memory(op)){\n        return  (dst.is_tmp() && dst.tmp() == tmp) ||\n                (src1.is_tmp() && src1.tmp() == tmp) || \n                (src2.is_tmp() && src2.tmp() == tmp);\n    }else if( op == IROperation::BCC || op == IROperation::JCC){\n        return (dst.is_tmp() && dst.tmp() == tmp) ||\n                (src1.is_tmp() && src1.tmp() == tmp) || \n                (src2.is_tmp() && src2.tmp() == tmp);\n    }else if( op == IROperation::BISZ ){\n        return src1.is_tmp() && src1.tmp() == tmp;\n    }else{\n        throw runtime_exception(\"IRInstruction::reads_tmp() got unknown IROperation\");\n    }\n}\n\nbool IRInstruction::writes_tmp(IRVar tmp){\n    if( iroperation_is_assignment(op)){\n        return  (dst.is_tmp() && dst.tmp() == tmp);\n    }else if( iroperation_is_memory(op)){\n        return  false;\n    }else if( op == IROperation::BCC || op == IROperation::JCC){\n        return false;\n    }else if( op == IROperation::BISZ){\n        return (dst.is_tmp() && dst.tmp() == tmp);\n    }else{\n        throw runtime_exception(\"IRInstruction::writes_tmp() got unknown IROperation\");\n    }\n}\n\nvector<IROperand> IRInstruction::used_vars_read(){\n    vector<IROperand> res;\n    if( iroperation_is_assignment(op)){\n        if(src1.is_var())\n            res.push_back(src1);\n        if( src2.is_var())\n            res.push_back(src2);\n    }else if( iroperation_is_memory(op) || op == IROperation::BCC || op == IROperation::JCC ){\n        if(src1.is_var())\n            res.push_back(src1);\n        if( src2.is_var())\n            res.push_back(src2);\n        if( dst.is_var() )\n            res.push_back(dst);\n    }else if( op == IROperation::BISZ ){\n        if(src1.is_var())\n            res.push_back(src1);\n    }else if( op == IROperation::INT || op == IROperation::SYSCALL){\n        // Ignore\n    }else{\n        throw runtime_exception(\"IRInstruction::used_vars_read() got unknown IROperation\");\n    }\n    return res;\n}\n\nvector<IROperand> IRInstruction::used_vars_write(){\n    vector<IROperand> res;\n    if( iroperation_is_assignment(op) || op == IROperation::LDM || op == IROperation::BISZ){\n        if(dst.is_var())\n            res.push_back(dst);\n    }else if( op == IROperation::STM || op == IROperation::BCC || op == IROperation::JCC || op == IROperation::INT || op == IROperation::SYSCALL ){\n        // Ignore those even if they rewrite pc\n    }else{\n        throw runtime_exception(\"IRInstruction::used_vars_write() got unknown IROperation\");\n    }\n    return res;\n}\n\nvector<IROperand> IRInstruction::used_tmps_read(){\n    vector<IROperand> res;\n    if( iroperation_is_assignment(op)){\n        if(src1.is_tmp())\n            res.push_back(src1);\n        if( src2.is_tmp())\n            res.push_back(src2);\n    }else if( iroperation_is_memory(op) || op == IROperation::BCC || op == IROperation::JCC || op == IROperation::INT ){\n        if(src1.is_tmp())\n            res.push_back(src1);\n        if( src2.is_tmp())\n            res.push_back(src2);\n        if( dst.is_tmp() )\n            res.push_back(dst);\n    }else if( op == IROperation::BISZ ){\n        if(src1.is_tmp())\n            res.push_back(src1);\n    }else if( op == IROperation::SYSCALL ){\n        if( src1.is_tmp() )\n            res.push_back(src1);\n    }else{\n        throw runtime_exception(\"IRInstruction::used_tmps_read() got unknown IROperation\");\n    }\n    return res;\n}\n\nvector<IROperand> IRInstruction::used_tmps_write(){\n    vector<IROperand> res;\n    if( iroperation_is_assignment(op)){\n        if(dst.is_tmp())\n            res.push_back(dst);\n    }else if( iroperation_is_memory(op) || op == IROperation::BCC || op == IROperation::JCC \n              || op == IROperation::INT || op == IROperation::SYSCALL){\n        // Ignore \n    }else if( op == IROperation::BISZ ){\n        if(dst.is_tmp())\n            res.push_back(dst);\n    }else{\n        throw runtime_exception(\"IRInstruction::used_tmps_write() got unknown IROperation\");\n    }\n    return res;\n}\n\nostream& operator<<(ostream& os, IRInstruction& ins){\n    os << \"(0x\" << std::hex << ins.addr << \")\";\n    os << \"\\t\" << ins.op << \"\\t\";\n    if( ins.op == IROperation::BCC ){\n        os << ins.dst << \",\\tbblk_\" << ins.src1.cst();\n        if( !ins.src2.is_none()){\n            os << \",\\t\\tbblk_\" << ins.src2.cst();\n        }\n    }else{\n        os << ins.dst << \",\\t\" << ins.src1;\n        if( !ins.src2.is_none()){\n            os << \",\\t\" << ins.src2;\n        }\n    }\n    os << std::endl;\n    return os;\n}\n\n/* Helpers to create instructions */\nIRInstruction ir_add(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::ADD, dst, src1, src2, addr);\n}\nIRInstruction ir_sub(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::SUB, dst, src1, src2, addr);\n}\nIRInstruction ir_mul(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::MUL, dst, src1, src2, addr);\n}\nIRInstruction ir_mulh(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::MULH, dst, src1, src2, addr);\n}\nIRInstruction ir_smull(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::SMULL, dst, src1, src2, addr);\n}\nIRInstruction ir_smulh(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::SMULH, dst, src1, src2, addr);\n}\nIRInstruction ir_div(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::DIV, dst, src1, src2, addr);\n}\nIRInstruction ir_sdiv(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::SDIV, dst, src1, src2, addr);\n}\nIRInstruction ir_and(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::AND, dst, src1, src2, addr);\n}\nIRInstruction ir_or(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::OR, dst, src1, src2, addr);\n}\nIRInstruction ir_xor(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::XOR, dst, src1, src2, addr);\n}\nIRInstruction ir_shl(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::SHL, dst, src1, src2, addr);\n}\nIRInstruction ir_shr(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::SHR, dst, src1, src2, addr);\n}\nIRInstruction ir_mod(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::MOD, dst, src1, src2, addr);\n}\nIRInstruction ir_smod(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::SMOD, dst, src1, src2, addr);\n}\nIRInstruction ir_neg(IROperand dst, IROperand src1, addr_t addr){\n    return IRInstruction(IROperation::NEG, dst, src1, addr);\n}\nIRInstruction ir_not(IROperand dst, IROperand src1, addr_t addr){\n    return IRInstruction(IROperation::NOT, dst, src1, addr);\n}\nIRInstruction ir_ldm(IROperand dst, IROperand src1, addr_t addr){\n    return IRInstruction(IROperation::LDM, dst, src1, addr);\n}\nIRInstruction ir_stm(IROperand dst, IROperand src1, addr_t addr){\n    return IRInstruction(IROperation::STM, dst, src1, addr);\n}\nIRInstruction ir_mov(IROperand dst, IROperand src1, addr_t addr){\n    return IRInstruction(IROperation::MOV, dst, src1, addr);\n}\nIRInstruction ir_bcc(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::BCC, dst, src1, src2, addr);\n}\nIRInstruction ir_jcc(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::JCC, dst, src1, src2, addr);\n}\nIRInstruction ir_bisz(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::BISZ, dst, src1, src2, addr);\n}\nIRInstruction ir_concat(IROperand dst, IROperand src1, IROperand src2, addr_t addr){\n    return IRInstruction(IROperation::CONCAT, dst, src1, src2, addr);\n}\nIRInstruction ir_int(IROperand num, IROperand ret, addr_t addr){\n    return IRInstruction(IROperation::INT, num, ret, addr);\n}\nIRInstruction ir_syscall(IROperand type, IROperand ret, addr_t addr){\n    return IRInstruction(IROperation::SYSCALL, type, ret, addr);\n}\n\n/* ====================================== */\nIRContext::IRContext():_var(nullptr), _nb_var(0){}\nIRContext::IRContext(IRVar nb_var){\n    _var = new Expr[nb_var]{nullptr};\n    _nb_var = nb_var;\n}\nIRContext::~IRContext(){\n    delete [] _var; _var = nullptr;\n}\nint IRContext::nb_vars(){\n    return _nb_var;\n}\nvoid IRContext::set(IRVar num, Expr e){\n    if( num >= _nb_var ){\n        throw ir_exception(\"IRContext::set(): Invalid register argument\");\n    }\n    _var[num] = e;\n}\nExpr IRContext::get(IRVar num){\n    if( num >= _nb_var ){\n        throw ir_exception(\"IRContext::get(): Invalid register argument\");\n    }\n    return _var[num];\n}\n\nostream& operator<<(ostream& os, IRContext& ctx){\n    for( int i = 0; i < ctx.nb_vars(); i++){\n        os << \"Var_\" << i << \" : \" << ctx.get(i) << std::endl;\n    }\n    return os;\n}\n/* ====================================== */\n\nMemContext::MemContext(){\n    simp = NewDefaultExprSimplifier();\n}\n\nvoid MemContext::write(Expr addr, Expr expr){\n    unordered_map<Expr, Expr>::iterator it;\n    addr = simp->simplify(addr); // Simplify address to check for collisions\n    if( (it = writes.find(addr)) != writes.end()){\n        if( it->second->size <= expr->size ){\n            writes[addr] = expr;\n        }else{\n            writes[addr] = concat(extract(it->second, it->second->size-1, expr->size), expr);\n        }\n    }else{\n        writes[addr] = expr;\n    }\n}\n\nExpr MemContext::read(Expr addr, int octets){\n    unordered_map<Expr, Expr>::iterator it;\n    addr = simp->simplify(addr);\n    if( (it = writes.find(addr)) != writes.end()){\n        if( it->second->size/8 == octets ){\n            return it->second;\n        }else if( it->second->size/8 > octets ){\n            return extract(it->second, (octets*8)-1, 0);\n        }else{\n            return concat(exprmem(octets*8 - it->second->size, addr + it->second->size), it->second);\n        }\n    }else{\n        return exprmem(octets*8, addr);\n    }\n}\n\nMemContext::~MemContext(){\n    delete simp; simp = nullptr;\n}\n\nostream& operator<<(ostream& os, MemContext& ctx){\n    for( auto w : ctx.writes ){\n        os << \"MEM[\" << w.first << \"] : \" << w.second << std::endl;\n    }\n    return os;\n}\n\n/* ====================================== */\n\nusing std::max;\nusing std::min;\n\nIRBlock::IRBlock(string n, addr_t start, addr_t end): name(n), ir_size(0), \n            raw_size(0), start_addr(start), end_addr(end), _nb_tmp_vars(0),\n            _nb_instr(0), _nb_instr_ir(0), known_max_sp_inc(false), ends_with_int80(false),\n            ends_with_syscall(false){\n    branch_target[0] = 0;\n    branch_target[1] = 0;\n    memset(dereferenced_regs, false, sizeof(dereferenced_regs)); \n}\n\nvoid IRBlock::add_instr(IRBasicBlockId bblock, IRInstruction instr){\n    assert(_bblocks.size() > bblock && \"Adding instruction to basic block that doesn't exist\" );\n    _bblocks[bblock].push_back(instr);\n    ir_size++;\n}\n\nIRBasicBlockId IRBlock::new_bblock(){\n    _bblocks.push_back(IRBasicBlock());\n    return (IRBasicBlockId)(_bblocks.size()-1);\n}\n\nIRBasicBlock& IRBlock::get_bblock(IRBasicBlockId id){\n    return _bblocks[id];\n}\n\nint IRBlock::nb_bblocks(){\n    return _bblocks.size();\n}\n\nvector<IRBasicBlock>& IRBlock::bblocks(){\n    return _bblocks;\n};\n\nostream& operator<<(ostream& os, IRBlock& blk){\n    IRBasicBlock::iterator it;\n    os << std::endl << blk.name;\n    for( int i = 0; i < blk.bblocks().size(); i++){\n        os << \"\\n\\tbblk_\" << i << \":\" << std::endl;\n        for( it = blk.bblocks()[i].begin(); it != blk.bblocks()[i].end(); it++){\n            os << \"\\t\" << *it;\n        }\n    }\n    return os;\n}\n"
  },
  {
    "path": "libropium/ropchain/assertion.cpp",
    "content": "#include \"assertion.hpp\"\n#include <algorithm>\n\n\nvoid ValidPointers::add_valid_pointer(int reg){\n    _regs.push_back(reg);\n}\n\nbool ValidPointers::is_valid_pointer(int reg){\n    return std::find(_regs.begin(), _regs.end(), reg) != _regs.end();\n}\n\nvoid ValidPointers::clear(){\n    _regs.clear();\n}\n\n\n\nvoid Assertion::clear(){\n    valid_pointers.clear();\n}\n"
  },
  {
    "path": "libropium/ropchain/constraint.cpp",
    "content": "#include \"constraint.hpp\"\n#include <cstring>\n#include <iostream>\n\n/* =============== Bad Bytes ================= */\n\nvoid BadBytes::add_bad_byte(unsigned char byte){\n    _bad_bytes.push_back(byte);\n}\n\nvoid BadBytes::clear(){\n    _bad_bytes.clear();\n}\n\nbool BadBytes::is_valid_byte(unsigned char byte){\n    return (std::find(_bad_bytes.begin(), _bad_bytes.end(), byte) == _bad_bytes.end());\n}\n\nunsigned char BadBytes::get_valid_byte(){\n    for( unsigned char byte = 0xff; byte >= 0; byte--){\n        if( is_valid_byte(byte) ){\n            return byte;\n        }\n    }\n    throw runtime_exception(\"BadBytes::get_valid_byte(): all bytes are invalid!\");\n}\n\naddr_t BadBytes::get_valid_padding(int nb_bytes){\n    unsigned char byte = get_valid_byte();\n    addr_t res = 0;\n    for(; nb_bytes > 0; nb_bytes--){\n        res = (res<<8) + byte;\n    }\n    return res;\n}\n\nbool BadBytes::is_valid_address(addr_t addr, int arch_bytes){\n    for( int i = 0; i < arch_bytes; i++){\n        if( ! is_valid_byte(addr & 0xff)){\n            return false;\n        }\n        addr >>= 8;\n    }\n    return true;\n}\n\naddr_t BadBytes::get_valid_address(Gadget* gadget, int arch_bytes){\n    for( addr_t addr : gadget->addresses ){\n        if( is_valid_address(addr, arch_bytes) ){\n            return addr;\n        }\n    }\n    throw runtime_exception(\"BadBytes::get_valid_address(): all addresses are invalid!\");\n}\n\nbool BadBytes::check(Gadget* gadget, int arch_bytes){\n    for( addr_t addr : gadget->addresses ){\n        if( is_valid_address(addr, arch_bytes) ){\n            return true;\n        }\n    }\n    return false;\n}\n\n/* ================ Keep Regs ================= */\n\nvoid KeepRegs::add_keep_reg(int reg_num){\n    _keep.push_back(reg_num);\n}\n\nvoid KeepRegs::clear(){\n    _keep.clear();\n}\n\nvector<int>& KeepRegs::regs_to_keep(){\n    return _keep;\n}\n\nbool KeepRegs::is_kept(int reg_num){\n    return (std::find(_keep.begin(), _keep.end(), reg_num) != _keep.end());\n}\n\nbool KeepRegs::check(Gadget* gadget){\n    for( int reg : _keep ){\n        if( gadget->modified_regs[reg])\n            return false;\n    }\n    return true;\n}\n\n\n/* ================ Memory Safety ================= */\n\nMemSafety::MemSafety(){\n    _force_safe = true; // Enforce pointer safety by default\n    memset(_safe_reg_pointers, false, sizeof(_safe_reg_pointers));\n}\nvoid MemSafety::force_safe(){ _force_safe = true; }\nvoid MemSafety::enable_unsafe(){ _force_safe = false; }\n\nvoid MemSafety::add_safe_reg(int reg_num){\n    _safe_reg_pointers[reg_num] = true;\n}\n\nbool MemSafety::is_enforced(){\n    return _force_safe;\n}\n\nvoid MemSafety::clear(){\n    _force_safe = true;\n    memset(_safe_reg_pointers, false, sizeof(_safe_reg_pointers));\n}\n\nbool MemSafety::check(Gadget* gadget, int arch_nb_regs, Assertion* assertion){\n    if( !_force_safe)\n        return true;\n    for( int i = 0; i < arch_nb_regs; i++){\n        if( gadget->dereferenced_regs[i] ){ \n            if( !_safe_reg_pointers[i] &&\n            (assertion == nullptr || !assertion->valid_pointers.is_valid_pointer(i)))\n                return false;\n        }\n    }\n    return true;\n}\n\n/* =============== Full Constraint =================== */\n\nvoid Constraint::clear(){\n    bad_bytes.clear();\n    keep_regs.clear();\n    mem_safety.clear();\n}\n\nbool Constraint::check(Gadget* gadget, Arch* arch, Assertion* assertion){\n    return  bad_bytes.check(gadget, arch->octets) &&\n            keep_regs.check(gadget) &&\n            mem_safety.check(gadget, arch->nb_regs, assertion);\n}\n"
  },
  {
    "path": "libropium/ropchain/gadget.cpp",
    "content": "#include \"ropchain.hpp\"\n#include <unordered_map>\n#include <iostream>\n#include <cstring>\n\nusing std::unordered_map;\n\nGadget::Gadget():semantics(nullptr), bin_num(-1), branch_type(BranchType::NONE){\n    memset(modified_regs, 0, sizeof(modified_regs));\n}\n\nGadget::~Gadget(){\n    delete semantics;\n    semantics = nullptr;\n}\n\nvoid Gadget::add_address(addr_t addr){\n    addresses.push_back(addr);\n}\n\nvoid Gadget::print(ostream& os){\n    os << \"Gadget: \" << asm_str << std::endl;\n    for( int i = 0; i < semantics->regs->nb_vars(); i++){\n        if( modified_regs[i] )\n            os << \"Reg_\" << i << \" : \" << semantics->regs->get(i) << std::endl;\n    }\n    os << *(semantics->mem) << std::endl;\n}\n\nostream& operator<<(ostream& os, Gadget& g){\n    g.print(os);\n    return os;\n}\n\nbool Gadget::lthan(Gadget& other){\n    if( nb_instr != other.nb_instr ){\n        return nb_instr < other.nb_instr;\n    }else{\n        return nb_instr_ir < other.nb_instr_ir; \n    }\n}\n"
  },
  {
    "path": "libropium/ropchain/ropchain.cpp",
    "content": "#include \"ropchain.hpp\"\n#include \"utils.hpp\"\n#include <iostream>\n#include <sstream>\n\n\nROPChain::ROPChain(Arch* a):arch(a){}\n\nvoid ROPChain::add_gadget(addr_t addr, Gadget* gadget){\n    items.push_back(ROPItem(addr, gadget));\n}\n\nvoid ROPChain::add_padding(cst_t value, string msg){\n    items.push_back(ROPItem(ROPItemType::PADDING, value, msg));\n}\n\nvoid ROPChain::add_gadget_address(cst_t value, string msg){\n    items.push_back(ROPItem(ROPItemType::GADGET_ADDRESS, value, msg));\n}\n\nvoid ROPChain::add_chain(ROPChain& other){\n    for( ROPItem& item : other.items ){\n        items.push_back(item);\n    }\n}\n\nint ROPChain::len(){\n    return items.size();\n}\n\nvoid ROPChain::print_pretty(ostream& os, string tab){\n    for(ROPItem& item : items){\n        if( item.type == ROPItemType::GADGET ){\n            os << tab << str_special(value_to_hex_str(arch->octets, item.addr)) << \" (\" << str_bold(item.gadget->asm_str) << \")\" << std::endl;\n        }else if( item.type == ROPItemType::PADDING ){\n            os << tab << str_special(value_to_hex_str(arch->octets, item.value));\n            if( !item.msg.empty() )\n                os << \" (\" << str_bold(item.msg) << \")\";\n            os << std::endl;\n        }else if( item.type == ROPItemType::GADGET_ADDRESS ){\n            os << tab << str_special(value_to_hex_str(arch->octets, item.value));\n            if( !item.msg.empty() )\n                os << \" (\" << str_bold(item.msg) << \")\";\n            os << std::endl;\n        }else{\n            os << tab << \"Unsupported \" << std::endl;\n        }\n    }\n}\n\nvoid ROPChain::print_python(ostream& os, string tab){\n    string pack, endian, p; \n    addr_t gadgets_offset = 0;\n    // Set packing strings, endianness, etc\n    p = \"p\"; \n    if( arch->octets == 4 )\n        endian = \"'<I'\";\n    else if( arch->octets == 8 )\n        endian = \"'<Q'\";\n    else\n        throw runtime_exception(\"ROPChain::print_python(): Doesn't support printing for non 4 or 8 octets address size\");\n    pack = p + \" += pack(\" + endian + \", \"; \n    // Init chain\n    os << tab << \"from struct import pack\" ;\n    os << \"\\n\" << tab << \"off = 0x\" << std::hex << gadgets_offset;\n    os << \"\\n\" << tab << p << \" = ''\" << std::endl; \n\n    for(ROPItem& item : items){\n        if( item.type == ROPItemType::GADGET ){\n            os << tab << pack << str_special(value_to_hex_str(arch->octets, item.addr)) << \" + off) # \" << str_bold(item.gadget->asm_str) << std::endl;\n        }else if( item.type == ROPItemType::PADDING ){\n            os << tab << pack << str_special(value_to_hex_str(arch->octets, item.value)) << \")\";\n            if( !item.msg.empty())\n                os << \" # \" << str_bold(item.msg);\n            os << std::endl;\n        }else if( item.type == ROPItemType::GADGET_ADDRESS ){\n            os << tab << pack << str_special(value_to_hex_str(arch->octets, item.value)) << \" + off)\";\n            if( !item.msg.empty())\n                os << \" # \" << str_bold(item.msg);\n            os << std::endl;\n        }else{\n            os << tab << \"[Unsupported item]\" << std::endl;\n        }\n    }\n}\n\nvoid append_value_to_bytes(vector<uint8_t>& bytes, addr_t val, int nb_octets){\n    // Assume little endian\n    for( int i = 0; i < nb_octets; i++){\n        bytes.push_back((uint8_t)(val & 0xff));\n        val >>= 8;\n    }\n}\n\nvoid ROPChain::dump_raw(vector<uint8_t>& bytes){\n    for(ROPItem& item : items){\n        if( item.type == ROPItemType::GADGET ){\n            append_value_to_bytes(bytes, item.addr, arch->octets);\n        }else if( item.type == ROPItemType::PADDING ){\n            append_value_to_bytes(bytes, item.value, arch->octets);\n        }else if( item.type == ROPItemType::GADGET_ADDRESS ){\n            append_value_to_bytes(bytes, item.value, arch->octets);\n        }else{\n            throw runtime_exception(\"ROPChain::print_raw() got unsupported item type\");\n        }\n    }\n}\n\nostream& operator<<(ostream& os, ROPChain& ropchain){\n    ropchain.print_pretty(os);\n    return os;\n}\n"
  },
  {
    "path": "libropium/symbolic/expression.cpp",
    "content": "#include \"expression.hpp\"\n#include \"exception.hpp\"\n#include <cassert>\n#include <cstring>\n#include \"murmur3.h\"\n#include <algorithm>\n#include <iostream>\n#include <sstream>\n\nusing std::make_shared;\nusing std::stringstream;\n\n/* Expression hashes \n\nIn order to enabe quick equality checks between expressions, each\nexpression has a 32-bit hash that 'uniquely' identifies it (colisions\nare estimated unlikely enough to be ignored).  \n\nThe hash is not computed at expression creation. Some benchmarks seemed\nto indicate that it was increasing the creation time by about 80%. For\nthis reason, hashes are computed dynamically when needed. \n\nThe current implementation uses the murmur3 hash function C implementation\navailable on https://github.com/PeterScott/murmur3.  \n\nHash computation:\nSeveral util functions named \"prepare_hash_with_<type>\" enable to add data\nto the input buffer, and the exprhash() function computes the hash of the\nbuffer contents.\n\n*/ \n\n#define MAXLEN_HASH_IN 1024\n\n/* Set of functions to add a value to be hashed in the hash input buffer\n * 'hash_in' and returns the number of bytes added */ \ninline int prepare_hash_with_i64(uint8_t* hash_in, int64_t val, int index=0){\n     *(int64_t*)(hash_in+index) = val;\n     return index + 8; \n}\n\ninline int prepare_hash_with_str(uint8_t* hash_in, const string& str, int index=0){\n    strncpy((char*)hash_in+index, str.data(), str.length());\n    return index + str.length();\n}\n\ninline int prepare_hash_with_i32(uint8_t* hash_in, int32_t val, int index=0){\n    *(int32_t*)(hash_in+index) = val;\n     return (index + 4); \n}\n\ninline int prepare_hash_with_op(uint8_t* hash_in, Op op, int index=0){\n    *((uint8_t*)((char*)hash_in+index)) = static_cast<uint8_t>(op);\n    return index + 1; \n}\n/* Hash the currently prepared buffer */ \nhash_t exprhash(void* hash_in, int len, uint32_t seed){\n    unsigned char hash_out[4];\n    MurmurHash3_x86_32(hash_in, len, seed, hash_out);\n    return *((hash_t*)hash_out);\n}\n\n/* Implementation of expression classes */ \n// ==================================\nExprObject::ExprObject(ExprType t, exprsize_t s, bool _is_simp): type(t), size(s), _hashed(false), _hash(0), \n    _simplified_expr(nullptr), _is_simplified(_is_simp), _concrete_ctx_id(-1){}\nbool ExprObject::is_cst(){return type == ExprType::CST;}\nbool ExprObject::is_var(){return type == ExprType::VAR;}\nbool ExprObject::is_mem(){return type == ExprType::MEM;}\nbool ExprObject::is_unop(Op op){return false;}\nbool ExprObject::is_binop(Op op){return false;}\nbool ExprObject::is_extract(){return type == ExprType::EXTRACT;}\nbool ExprObject::is_concat(){return type == ExprType::CONCAT;}\nbool ExprObject::is_bisz(){return type == ExprType::BISZ;}\nbool ExprObject::is_unknown(){return type == ExprType::UNKNOWN;}\ncst_t ExprObject::concretize(VarContext* ctx ){throw runtime_exception(\"Can not concretize base class ExprObject\");}\nbool ExprObject::eq(Expr other){return hash() == other->hash();}\nbool ExprObject::neq(Expr other){return hash() != other->hash();}\nbool ExprObject::inf(Expr e2){\n    if( type != e2->type ){\n        return type < e2->type;\n    }else{\n        switch(type){\n            case ExprType::CST: return cst() < e2->cst();\n            case ExprType::VAR: return name().compare(e2->name()) > 0;\n            case ExprType::MEM: return args[0] < e2->args[0];\n            case ExprType::UNOP:\n                return( op() < e2->op() || \n                        args[0]->inf(e2->args[0]));\n            case ExprType::BINOP:\n                if( op() == e2->op() ){\n                    if( args[0]->eq(e2->args[0]) )\n                        return args[1]->inf(e2->args[1]);\n                    else\n                        return args[0]->inf(e2->args[0]); \n                }else\n                    return op() < e2->op(); \n            case ExprType::EXTRACT:\n            case ExprType::CONCAT:\n                for( int i = 0; i < (this->is_extract()?3:2); i++){\n                    if( args[i]->eq(e2->args[i]) )\n                        continue;\n                    return args[i]->inf(e2->args[i]); \n                }\n                return false;\n            case ExprType::BISZ:\n                return args[0]->inf(e2->args[0]);\n            case ExprType::UNKNOWN:\n                return false;\n            default:\n                throw runtime_exception(\"ExprObject::inf() got unsupported ExprType\");\n        }\n    }\n}\n\nvoid ExprObject::replace_var_name(string& curr_name, string& new_name){\n    switch(type){\n        case ExprType::CST: return;\n        case ExprType::VAR: \n            if (name() == curr_name)\n                replace_name(new_name);\n            return;\n        case ExprType::MEM: \n        case ExprType::UNOP:\n        case ExprType::BISZ:\n            return args[0]->replace_var_name(curr_name, new_name);\n        case ExprType::BINOP:\n        case ExprType::EXTRACT:\n        case ExprType::CONCAT:\n            args[0]->replace_var_name(curr_name, new_name);\n            args[1]->replace_var_name(curr_name, new_name);\n            return;\n        case ExprType::UNKNOWN:\n            return;\n        default:\n            throw runtime_exception(\"ExprObject::replace_var_name() got unsupported ExprType\");\n    }\n}\n\n// ==================================\nExprCst::ExprCst(exprsize_t s, cst_t c): ExprObject(ExprType::CST, s, true){\n    _cst = cst_sign_extend(s, c);\n    if( s > 64 ){\n        throw expression_exception(QuickFmt() << \"Cannot create constant expression of size > 64 (got \"\n            << std::dec << s << \")\" >> QuickFmt::to_str);\n    }\n}\nhash_t ExprCst::hash(){\n    unsigned char hash_in[MAXLEN_HASH_IN]; \n    if( !_hashed ){\n        _hash = exprhash(hash_in, prepare_hash_with_i64(hash_in, _cst), size);\n        _hashed = true;\n    }\n    return _hash;\n}\ncst_t ExprCst::cst(){ return _cst; }\ncst_t ExprCst::concretize(VarContext* ctx){return _cst;}\nvoid ExprCst::print(ostream& os){os << std::showbase << cst_sign_trunc(size, _cst) << std::noshowbase;}\nExpr ExprCst::copy(){\n    return exprcst(size, _cst);\n}\n// ==================================\nExprVar::ExprVar(exprsize_t s, string n, int num): ExprObject(ExprType::VAR, s, true), _name(n), _num(num){\n    if( s > 64 ){\n        throw expression_exception(QuickFmt() << \"Cannot create symbolic variables of size > 64 (got \"\n            << std::dec << s << \")\" >> QuickFmt::to_str);\n    }\n    assert( n.size() <= MAXLEN_HASH_IN );\n}\nhash_t ExprVar::hash(){\n    unsigned char hash_in[MAXLEN_HASH_IN]; \n    if( !_hashed ){\n        _hash = exprhash(hash_in, prepare_hash_with_str(hash_in, _name),size); \n        _hashed = true;\n    }\n    return _hash;\n}\n\nbool ExprVar::is_reg(int reg){\n    return _num == reg;\n}\nint ExprVar::reg(){ return _num;}\ncst_t ExprVar::concretize(VarContext* ctx){\n    if( ctx == nullptr){\n        throw expression_exception(\"Cannot concretize symbolic variable without supplying a context\");\n    }\n    if( _concrete_ctx_id == ctx->id )\n        return _concrete;\n    else{\n        _concrete = ctx->get(_name);\n        _concrete_ctx_id = ctx->id; \n    }\n    return _concrete; \n}\nconst string& ExprVar::name(){ return _name; } \nvoid ExprVar::replace_name(string& new_name){ _name = new_name; }\nvoid ExprVar::print(ostream& os){os << _name;}\nExpr ExprVar::copy(){\n    return exprvar(size, _name);\n}\n\n// ==================================\nExprMem::ExprMem(exprsize_t s, Expr addr): ExprObject(ExprType::MEM, s, false) {\n    args.push_back(addr);\n}\nhash_t ExprMem::hash(){\n    unsigned char hash_in[MAXLEN_HASH_IN]; \n    if( !_hashed ){\n        _hash = exprhash(hash_in, prepare_hash_with_i32(hash_in, args[0]->hash()), size);\n        _hashed = true;\n    }\n    return _hash; \n}\ncst_t ExprMem::concretize(VarContext* ctx){\n    throw runtime_exception(\"concretize() not imlemented for memory expressions!\");\n}\nvoid ExprMem::print(ostream& os){\n    os << \"@\" << std::dec << size << \"[\" << std::hex << args.at(0) << \"]\";\n}\n\nExpr ExprMem::copy(){\n    return exprmem(size, args[0]->copy());\n}\n\n// ==================================\nExprUnop::ExprUnop(Op o, Expr arg): ExprObject(ExprType::UNOP, arg->size), _op(o){\n    args.push_back(arg);\n}\nhash_t ExprUnop::hash(){\n    unsigned char hash_in[MAXLEN_HASH_IN]; \n    if( !_hashed ){\n        _hash = exprhash(hash_in, prepare_hash_with_i32(hash_in, args[0]->hash(), prepare_hash_with_op(hash_in, _op)), size);\n        _hashed = true;\n    }\n    return _hash;\n}\nOp ExprUnop::op(){ return _op;}\nvoid ExprUnop::print(ostream& os){\n    os << op_to_str(_op) << std::hex; \n    args.at(0)->print(os);\n}\n\nbool ExprUnop::is_unop(Op op){\n    if( op == Op::NONE )\n        return true;\n    else\n        return op == _op; \n}\n\ncst_t ExprUnop::concretize(VarContext* ctx){\n    if( ctx != nullptr && _concrete_ctx_id == ctx->id )\n        return _concrete;\n    else{\n        switch(_op){\n            case Op::NEG: _concrete =cst_sign_extend(size, -(args[0]->concretize(ctx))); break;\n            case Op::NOT: _concrete =cst_sign_extend(size, ~(args[0]->concretize(ctx))); break;\n            default: throw runtime_exception(\"Missing case in ExprUnop::concretize()\");\n        }\n        if( ctx != nullptr ){\n            _concrete_ctx_id = ctx->id; \n        }\n        _concrete = cst_sign_extend(size, _concrete);\n    }\n    return _concrete; \n}\n\nExpr ExprUnop::copy(){\n    return exprunop(_op, args[0]->copy());\n}\n\n// ==================================\nExprBinop::ExprBinop(Op o, Expr left, Expr right): ExprObject(ExprType::BINOP, left->size), _op(o){\n    if( left->size != right->size ){\n        throw expression_exception(QuickFmt() << \"Cannot use binary operator on expressions of different sizes (got \" << left->size << \" and \" << right->size << \")\" >> QuickFmt::to_str);\n    }\n    args.push_back(left);\n    args.push_back(right);\n}\nhash_t ExprBinop::hash(){\n    unsigned char hash_in[MAXLEN_HASH_IN]; \n    if( !_hashed ){\n        _hash = exprhash(hash_in, prepare_hash_with_i32(hash_in, args[1]->hash(), \n                    prepare_hash_with_op(hash_in, _op,\n                    prepare_hash_with_i32(hash_in, args[0]->hash()))), size);\n        _hashed = true;\n    }\n    return _hash; \n}\nOp ExprBinop::op(){ return _op;}\nvoid ExprBinop::get_associative_args(Op o, vector<Expr>& vec){\n    if( _op == o ){\n        if( args[0]->is_binop() && args[0]->op() == o )\n            args[0]->get_associative_args(o, vec);\n        else\n            vec.push_back(args[0]);\n        if( args[1]->is_binop(o) )\n            args[1]->get_associative_args(o, vec);\n        else\n            vec.push_back(args[1]);\n    }\n    /* No else statement\n     * This function should never be called recursively when the operand\n     * is not equal to the argument 'o'. The reason is that leaf expressions\n     * (i.e that are not from the requested operator) cannot return shared_ptr\n     * to themselves without loosing the type information. So all checks are done\n     * by the enclosing binary operations */ \n}\nvoid ExprBinop::get_left_associative_args(Op o, vector<Expr>& vec, Expr& leftmost){\n    if( _op == o ){\n        vec.push_back(args[1]);\n        if( args[0]->is_binop(o))\n            args[0]->get_left_associative_args(o, vec, leftmost );\n        else\n            leftmost = args[0];\n    }else{\n        leftmost = make_shared<ExprObject>(*this);\n    }\n}\n\n\nvoid ExprBinop::print(ostream& os){\n    os << \"(\" << std::hex;\n    args.at(0)->print(os);\n    os << op_to_str(_op) << std::hex;\n    args.at(1)->print(os); \n    os << \")\";\n}\n\nbool ExprBinop::is_binop(Op op){\n    if( op == Op::NONE )\n        return true;\n    else\n        return op == _op; \n}\n\ncst_t ExprBinop::concretize(VarContext* ctx){\n    if( ctx != nullptr && _concrete_ctx_id == ctx->id )\n        return _concrete;\n    else{\n        switch(_op){\n            case Op::ADD: _concrete = (args[0]->concretize(ctx) + args[1]->concretize(ctx)); break;\n            case Op::MUL: _concrete = ((ucst_t)args[0]->concretize(ctx) * (ucst_t)args[1]->concretize(ctx)); break;\n            case Op::MULH: _concrete = (cst_t)(((__uint128_t)cst_sign_trunc(args[0]->size, args[0]->concretize(ctx)) * \n                                                            cst_sign_trunc(args[1]->size, (__uint128_t)args[1]->concretize(ctx))) >> size ); break;\n            case Op::DIV: _concrete = ((ucst_t)cst_sign_trunc(args[0]->size, args[0]->concretize(ctx)) / (ucst_t)cst_sign_trunc(args[1]->size, args[1]->concretize(ctx))); break;\n            case Op::SDIV: _concrete = (args[0]->concretize(ctx) / args[1]->concretize(ctx)); break;\n            case Op::AND: _concrete = (args[0]->concretize(ctx) & args[1]->concretize(ctx)); break;\n            case Op::OR: _concrete = (args[0]->concretize(ctx) | args[1]->concretize(ctx)); break;\n            case Op::XOR: _concrete = (args[0]->concretize(ctx) ^ args[1]->concretize(ctx)); break;\n            case Op::MOD: _concrete = ((ucst_t)args[0]->concretize(ctx) % (ucst_t)args[1]->concretize(ctx)); break;\n            case Op::SMOD: _concrete = (args[0]->concretize(ctx) % args[1]->concretize(ctx)); break;\n            case Op::SMULL: _concrete = (cst_t)((__int128_t)args[0]->concretize(ctx) * args[1]->concretize(ctx)); break;\n            case Op::SMULH: _concrete = (cst_t)(((__int128_t)args[0]->concretize(ctx) * args[1]->concretize(ctx)) >> size); break;\n            case Op::SHL: \n                if( cst_sign_trunc(args[1]->size, args[1]->concretize(ctx)) >= args[0]->size ){\n                    _concrete = 0;\n                }else{ \n                    _concrete = ((ucst_t)cst_sign_trunc(args[0]->size, args[0]->concretize(ctx))) << ((ucst_t)args[1]->concretize(ctx));\n                }\n                break;\n            case Op::SHR: \n                if( cst_sign_trunc(args[1]->size, args[1]->concretize(ctx)) >= args[0]->size ){\n                    _concrete = 0;\n                }else{\n                    _concrete = ((ucst_t)cst_sign_trunc(args[0]->size, args[0]->concretize(ctx))) >> ((ucst_t)args[1]->concretize(ctx));\n                }\n                break;\n            default: throw runtime_exception(\"Missing case in ExprBinop::concretize()\");\n        }\n        if( ctx != nullptr ){\n            _concrete_ctx_id = ctx->id;\n        }\n        _concrete =cst_sign_extend(size, _concrete);\n    }\n    return _concrete; \n}\n\nExpr ExprBinop::copy(){\n    return exprbinop(_op, args[0]->copy(), args[1]->copy());\n}\n\n// ==================================\nExprExtract::ExprExtract(Expr arg, Expr higher, Expr lower): ExprObject(ExprType::EXTRACT, 0){\n    assert(higher->is_cst() && lower->is_cst() && \n    \"Cannot create extract with bit parameters that are not constant expressions\");\n    if( (ucst_t)higher->cst() < (ucst_t)lower->cst() ){\n        throw expression_exception(\"Can not use Extract() with higher bit smaller than lower bit\");\n    }\n    if( (ucst_t)higher->cst() >= arg->size ){\n        throw expression_exception(QuickFmt() << \"Can not extract bit \" << higher->cst() << \" from expression of size \" << arg->size >> QuickFmt::to_str );\n    }\n    args.push_back(arg);\n    args.push_back(higher);\n    args.push_back(lower);\n    size = (ucst_t)higher->cst() - (ucst_t)lower->cst() + 1;\n}\nhash_t ExprExtract::hash(){\n    unsigned char hash_in[MAXLEN_HASH_IN];\n    if( !_hashed ){\n        _hash = exprhash(hash_in, prepare_hash_with_i32(hash_in, args[2]->hash(),\n                    prepare_hash_with_i32(hash_in, args[1]->hash(),\n                    prepare_hash_with_i32(hash_in, args[0]->hash()))), size);\n        _hashed = true;\n    }\n    return _hash; \n}\nvoid ExprExtract::print(ostream& os){\n    os << std::hex;\n    args.at(0)->print(os);\n    os << \"[\" << std::dec;\n    args.at(1)->print(os);\n    os << \":\" << std::dec;\n    args.at(2)->print(os);\n    os << \"]\";\n}\n\ncst_t ExprExtract::concretize(VarContext* ctx){\n    cst_t high, low;\n    ucst_t mask;\n    if( ctx != nullptr && _concrete_ctx_id == ctx->id )\n        return _concrete;\n    else{\n        high = args[1]->concretize(ctx);\n        low = args[2]->concretize(ctx);\n        if( high == 63 ){\n            mask = 0xffffffffffffffff;\n        }else{\n            mask = (((cst_t)1 << (high+1))-1);\n        }\n        \n        _concrete =  ((ucst_t)args[0]->concretize(ctx) & mask) >> (ucst_t)low;\n        if( ctx != nullptr )\n            _concrete_ctx_id = ctx->id; \n        _concrete = cst_sign_extend(size, _concrete);\n    }\n    return _concrete; \n}\n\nExpr ExprExtract::copy(){\n    return extract(args[0]->copy(), args[1]->copy(), args[2]->copy());\n}\n\n// ==================================\nExprConcat::ExprConcat(Expr upper, Expr lower): ExprObject(ExprType::CONCAT, upper->size+lower->size){\n    args.push_back(upper);\n    args.push_back(lower);\n}\nhash_t ExprConcat::hash(){\n    unsigned char hash_in[MAXLEN_HASH_IN]; \n    if( !_hashed ){\n        _hash = exprhash(hash_in, prepare_hash_with_i32(hash_in, args[1]->hash(), \n                    prepare_hash_with_i32(hash_in, args[0]->hash())), size);\n        _hashed = true;\n    }\n    return _hash; \n}\n\nvoid ExprConcat::print(ostream& os){\n    os << \"{\" << std::hex;\n    args.at(0)->print(os); \n    os << \",\" << std::hex;\n    args.at(1)->print(os); \n    os << \"}\";\n}\n\ncst_t ExprConcat::concretize(VarContext* ctx){\n    cst_t upper, lower; \n    if( ctx != nullptr && _concrete_ctx_id == ctx->id )\n        return _concrete;\n    else{\n        upper = args[0]->concretize(ctx);\n        lower = args[1]->concretize(ctx);\n        _concrete = cst_sign_extend(size, (((ucst_t)upper)<<(ucst_t)args[1]->size)\n                                             | (ucst_t)cst_sign_trunc(args[1]->size, lower));\n        if( ctx != nullptr )\n            _concrete_ctx_id = ctx->id; \n         _concrete =cst_sign_extend(size, _concrete);\n    }\n   \n    return _concrete; \n}\n\nExpr ExprConcat::copy(){\n    return concat(args[0]->copy(), args[1]->copy());\n}\n\n/* ===================================== */\nExprBisz::ExprBisz(exprsize_t _size, Expr cond, cst_t mode): ExprObject(ExprType::BISZ, _size){\n    if( mode != 0 && mode != 1){\n        throw expression_exception(QuickFmt() << \"Can only use Bisz() with mode 0 or 1 (got \" << mode << \")\" >> QuickFmt::to_str );\n    }\n    args.push_back(cond);\n    _mode = mode;\n}\nhash_t ExprBisz::hash(){\n    unsigned char hash_in[MAXLEN_HASH_IN]; \n    if( !_hashed ){\n        _hash = exprhash(hash_in, prepare_hash_with_i32(hash_in, args[0]->hash(), \n            prepare_hash_with_str(hash_in, (_mode)?\"BISZ1\":\"BISZ0\")), size);\n        _hashed = true;\n    }\n    return _hash;\n}\n\ncst_t ExprBisz::mode(){return _mode;}\n\nvoid ExprBisz::print(ostream& out){\n    if( _mode ){\n        out << \"bisz<1>(\" << std::hex;\n        args[0]->print(out);\n        out << \")\";\n    }else{\n        out << \"bisz<0>(\" << std::hex;\n        args[0]->print(out);\n        out << \")\";\n    }\n}\n\ncst_t ExprBisz::concretize(VarContext* ctx){\n    if( ctx != nullptr && _concrete_ctx_id == ctx->id )\n        return _concrete;\n    else{\n        _concrete = (args[0]->concretize(ctx) == 0)? _mode : _mode^1;\n        if( ctx != nullptr )\n            _concrete_ctx_id = ctx->id;\n        _concrete =cst_sign_extend(size, _concrete);\n    }\n    \n    return _concrete;\n}\n\nExpr ExprBisz::copy(){\n    return bisz(size, args[0]->copy(), _mode);\n}\n\n// ==================================\nExprUnknown::ExprUnknown(exprsize_t s): ExprObject(ExprType::UNKNOWN, s){}\nhash_t ExprUnknown::hash(){\n    unsigned char hash_in[MAXLEN_HASH_IN]; \n    if( !_hashed ){\n        _hash = exprhash(hash_in, prepare_hash_with_i32(hash_in, 0x77777777), size);\n        _hashed = true;\n    }\n    return _hash; \n}\nvoid ExprUnknown::print(ostream& os){\n    os << \"???\";\n}\n\ncst_t ExprUnknown::concretize(VarContext* ctx){\n    throw runtime_exception(\"Can not concretize ExprUnknown instance\");\n}\n\n// ==================================\n\n/* Helper functions to create new expressions */\n// Create from scratch  \nExpr exprcst(exprsize_t size, cst_t cst){\n    return make_shared<ExprCst>(size, cst);\n}\nExpr exprvar(exprsize_t size, string name, int num){\n    return make_shared<ExprVar>(size, name, num);\n}\nExpr exprmem(exprsize_t size, Expr addr){\n    return make_shared<ExprMem>(size, addr);\n}\nExpr exprbinop(Op op, Expr left, Expr right){\n    return expr_canonize(make_shared<ExprBinop>(op, left, right));\n} \nExpr exprunop(Op op, Expr arg){\n    return expr_canonize(make_shared<ExprUnop>(op, arg));\n} \nExpr extract(Expr arg, unsigned long higher, unsigned long lower){\n    return make_shared<ExprExtract>(arg, exprcst(sizeof(cst_t)*8, higher), exprcst(sizeof(cst_t)*8, lower));\n}\nExpr extract(Expr arg, Expr higher, Expr lower){\n    return make_shared<ExprExtract>(arg, higher, lower);\n}\nExpr concat(Expr upper, Expr lower){\n    return expr_canonize(make_shared<ExprConcat>(upper, lower));\n}\nExpr exprunknown(exprsize_t size){\n    return make_shared<ExprUnknown>(size);\n}\n// Binary operations \nExpr operator+(Expr left, Expr right){\n    return exprbinop(Op::ADD, left, right);\n}\nExpr operator+(Expr left, cst_t right ){\n    return exprbinop(Op::ADD, left, exprcst(left->size, right));\n}\nExpr operator+(cst_t left, Expr right){\n    return exprbinop(Op::ADD, exprcst(right->size, left), right);\n}\n\nExpr operator-(Expr left, Expr right){\n    return exprbinop(Op::ADD, left, \n            make_shared<ExprUnop>(Op::NEG,right));\n}\nExpr operator-(Expr left, cst_t right ){\n    return left - exprcst(left->size, right);\n}\nExpr operator-(cst_t left, Expr right){\n    return exprcst(right->size, left) - right;\n}\n\nExpr operator*(Expr left, Expr right){\n    return exprbinop(Op::MUL, left, right);\n}\nExpr operator*(Expr left, cst_t right ){\n    return exprbinop(Op::MUL, left, exprcst(left->size, right));\n}\nExpr operator*(cst_t left, Expr right){\n    return exprbinop(Op::MUL, exprcst(right->size, left), right);\n}\n\nExpr operator/(Expr left, Expr right){\n    return exprbinop(Op::DIV, left, right);\n}\nExpr operator/(Expr left, cst_t right ){\n    return exprbinop(Op::DIV, left, exprcst(left->size, right));\n}\nExpr operator/(cst_t left, Expr right){\n    return exprbinop(Op::DIV, exprcst(right->size, left), right);\n}\n\nExpr operator&(Expr left, Expr right){\n    return exprbinop(Op::AND, left, right);\n}\nExpr operator&(Expr left, cst_t right ){\n    return exprbinop(Op::AND, left, exprcst(left->size, right));\n}\nExpr operator&(cst_t left, Expr right){\n    return exprbinop(Op::AND, exprcst(right->size, left), right);\n}\n\nExpr operator|(Expr left, Expr right){\n    return exprbinop(Op::OR, left, right);\n}\nExpr operator|(Expr left, cst_t right ){\n    return exprbinop(Op::OR, left, exprcst(left->size, right));\n}\nExpr operator|(cst_t left, Expr right){\n    return exprbinop(Op::OR, exprcst(right->size, left), right);\n}\n\nExpr operator^(Expr left, Expr right){\n    return exprbinop(Op::XOR, left, right);\n}\nExpr operator^(Expr left, cst_t right ){\n    return exprbinop(Op::XOR, left, exprcst(left->size, right));\n}\nExpr operator^(cst_t left, Expr right){\n    return exprbinop(Op::XOR, exprcst(right->size, left), right);\n}\n\nExpr operator%(Expr left, Expr right){\n    return exprbinop(Op::MOD, left, right);\n}\nExpr operator%(Expr left, cst_t right ){\n    return exprbinop(Op::MOD, left, exprcst(left->size, right));\n}\nExpr operator%(cst_t left, Expr right){\n    return exprbinop(Op::MOD, exprcst(right->size, left), right);\n}\n\nExpr operator<<(Expr left, Expr right){\n    return exprbinop(Op::SHL, left, right);\n}\nExpr operator<<(Expr left, cst_t right ){\n    return exprbinop(Op::SHL, left, exprcst(left->size, right));\n}\nExpr operator<<(cst_t left, Expr right){\n    return exprbinop(Op::SHL, exprcst(right->size, left), right);\n}\n\nExpr operator>>(Expr left, Expr right){\n    return exprbinop(Op::SHR, left, right);\n}\nExpr operator>>(Expr left, cst_t right ){\n    return exprbinop(Op::SHR, left, exprcst(left->size, right));\n}\nExpr operator>>(cst_t left, Expr right){\n    return exprbinop(Op::SHR, exprcst(right->size, left), right);\n}\n\nExpr shl(Expr arg, Expr shift){\n    return exprbinop(Op::SHL, arg, shift);\n}\nExpr shl(Expr arg, cst_t shift){\n    return exprbinop(Op::SHL, arg, exprcst(arg->size,shift));\n}\nExpr shl(cst_t arg, Expr shift){\n    return exprbinop(Op::SHL, exprcst(shift->size,arg), shift);\n}\n\nExpr shr(Expr arg, Expr shift){\n    return exprbinop(Op::SHR, arg, shift);\n}\nExpr shr(Expr arg, cst_t shift){\n    return exprbinop(Op::SHR, arg, exprcst(arg->size,shift));\n}\nExpr shr(cst_t arg, Expr shift){\n    return exprbinop(Op::SHR, exprcst(shift->size,arg), shift);\n}\n\nExpr sdiv(Expr left, Expr right){\n    return exprbinop(Op::SDIV, left, right);\n}\nExpr sdiv(Expr left, cst_t right){\n    return exprbinop(Op::SDIV, left, exprcst(left->size, right));\n}\nExpr sdiv(cst_t left, Expr right){\n    return exprbinop(Op::SDIV, exprcst(right->size, left), right);\n}\n\nExpr smod(Expr left, Expr right){\n    return exprbinop(Op::SMOD, left, right);\n}\nExpr smod(Expr left, cst_t right){\n    return exprbinop(Op::SMOD, left, exprcst(left->size, right));\n}\nExpr smod(cst_t left, Expr right){\n    return exprbinop(Op::SMOD, exprcst(right->size, left), right);\n}\n\nExpr mulh(Expr left, Expr right){\n    return exprbinop(Op::MULH, left, right);\n}\nExpr mulh(Expr left, cst_t right){\n    return exprbinop(Op::MULH, left, exprcst(left->size, right));\n}\nExpr mulh(cst_t left, Expr right){\n    return exprbinop(Op::MULH, exprcst(right->size, left), right);\n}\n\nExpr smull(Expr left, Expr right){\n    return exprbinop(Op::SMULL, left, right);\n}\nExpr smull(Expr left, cst_t right){\n    return exprbinop(Op::SMULL, left, exprcst(left->size, right));\n}\nExpr smull(cst_t left, Expr right){\n    return exprbinop(Op::SMULL, exprcst(right->size, left), right);\n}\n\nExpr smulh(Expr left, Expr right){\n    return exprbinop(Op::SMULH, left, right);\n}\nExpr smulh(Expr left, cst_t right){\n    return exprbinop(Op::SMULH, left, exprcst(left->size, right));\n}\nExpr smulh(cst_t left, Expr right){\n    return exprbinop(Op::SMULH, exprcst(right->size, left), right);\n}\n\n// Unary operations\nExpr operator~(Expr arg){\n    return make_shared<ExprUnop>(Op::NOT, arg);\n}\nExpr operator-(Expr arg){\n    return make_shared<ExprUnop>(Op::NEG, arg);\n}\nExpr bisz(exprsize_t size, Expr cond, cst_t mode){\n    return make_shared<ExprBisz>(size, cond, mode);\n}\n\n/* Printing operators */ \nostream& operator<<(ostream& os, Expr e){\n    os << std::hex; // Default, print constants in hex\n    e->print(os);\n    return os;\n}\nstring op_to_str(Op op){\n    switch(op){\n        case Op::ADD: return \"+\";\n        case Op::MUL: return \"*\";\n        case Op::MULH: return \"*h \";\n        case Op::SMULL: return \"*lS \";\n        case Op::SMULH: return \"*hS \";\n        case Op::DIV: return \"/\";\n        case Op::SDIV: return \"/S \";\n        case Op::NEG: return \"-\";\n        case Op::AND: return \"&\"; \n        case Op::OR: return \"|\";\n        case Op::XOR: return \"^\";  \n        case Op::SHL: return \"<<\";\n        case Op::SHR: return \">>\";\n        case Op::NOT: return \"~\";\n        case Op::MOD: return \"%\";\n        case Op::SMOD: return \"%S \";\n        default: throw expression_exception(\"op_to_str(): got unknown operation!\");\n    }\n}\n\n/* ======= Canonize an expression ========== */\n\n/* This function can be used to build an associative binary operation from \n * an expression and a list of arguments.\n *  \n * This function is used when canonizing associative binary expressions where\n * arguments should be reordered and grouped by higher priority first. \n * \n * The function takes several arguments:\n *  - e : an expression that must be combined with the expressions in 'new_args'\n *        to build the new associative expression. It will be handled differently\n *        if it is a binop corresponding to 'op' or if it's a normal expression\n *  - op : the associative operation we build\n *  - new_args : a list of args that must be combined to 'e' with operation 'op'.\n *               the arguments are expected to be sorted from higher priority to\n *               lower priority\n * \n * The function combines the arguments in the canonic way ! \n * */\nExpr build_associative_from_args(Expr e, Op op, vector<Expr>& new_args){\n    Expr new_arg = nullptr, next_arg = nullptr;\n    Expr res = nullptr;\n    if( new_args.empty() ){\n        return e;\n    }\n    if( !e->is_binop(op)){\n        // e is not a binop of type 'op', we stop here and combine all args by priority\n        bool added_leaf = false;\n        for( vector<Expr>::iterator it = new_args.begin(); it != new_args.end(); it++ ){\n            if( !added_leaf && (*it)->inf(e)){\n                // Time to add args[0]\n                next_arg = e;\n                added_leaf = true;\n                it = it-1; // Dont forget to stay on the same new_arg then\n            }else{\n                // Get next arg\n                next_arg = *it;\n            }\n            if( res == nullptr){\n                res = next_arg;\n            }else{\n                res = make_shared<ExprBinop>(op, res, next_arg);\n            }\n        }\n        if( !added_leaf){\n            res = make_shared<ExprBinop>(op, res, e);\n        }\n        return res;\n    }else if( new_args.back()->inf(e->args[1]) ){\n        // e is a binop of type 'op' and the smaller new argument is smaller than\n        // the right side of 'e'.  So we insert the rest of the new arguments and\n        // add the smaller one in the end\n        new_arg = new_args.back();\n        new_args.pop_back();\n        res = build_associative_from_args(e, op, new_args);\n        return make_shared<ExprBinop>(op, res, new_arg);\n    }else{\n        // e is a binop of type 'op' and the smaller new argument is bigger than\n        // the right side of 'e'. So we need to insert all new args to the left side\n        // and finally add the right one in the end (because smallest priority)\n        res = build_associative_from_args(e->args[0], op, new_args);\n        return make_shared<ExprBinop>(op, res, e->args[1]);\n    }\n}\n\nExpr build_left_associative_from_args(Expr e, Op op, vector<Expr>& new_args){\n    Expr new_arg = nullptr, next_arg = nullptr;\n    Expr res = nullptr;\n    if( new_args.empty() ){\n        return e;\n    }\n    if( !e->is_binop(op)){\n        // e is not a binop of type 'op', we stop here and combine all args by priority\n        res = e;\n        for( vector<Expr>::iterator it = new_args.begin(); it != new_args.end(); it++ ){\n            res = make_shared<ExprBinop>(op, res, *it);\n        }\n        return res;\n    }else if( new_args.back()->inf(e->args[1]) ){\n        // e is a binop of type 'op' and the smaller new argument is smaller than\n        // the right side of 'e'.  So we insert the rest of the new arguments and\n        // add the smaller one in the end\n        new_arg = new_args.back();\n        new_args.pop_back();\n        res = build_left_associative_from_args(e, op, new_args);\n        return make_shared<ExprBinop>(op, res, new_arg);\n    }else{\n        // e is a binop of type 'op' and the smaller new argument is bigger than\n        // the right side of 'e'. So we need to insert all new args to the left side\n        // and finally add the right one in the end (because smallest priority)\n        res = build_left_associative_from_args(e->args[0], op, new_args);\n        return make_shared<ExprBinop>(op, res, e->args[1]);\n    }\n}\n\n\nExpr expr_canonize(Expr e){\n    vector<Expr> new_args;\n    Expr e1, e2, leftmost; \n    Expr res;\n    /* Binop */\n    if( e->is_binop() ){\n        if( op_is_associative(e->op()) && op_is_symetric(e->op())){\n            // Associative and symetric -> re-order arguments\n            // First get arguments list as long as the operator is used for\n            // right side argument \n            if( e->args[1]->is_binop(e->op()))\n                e->args[1]->get_associative_args(e->op(), new_args);\n            else\n                new_args.push_back(e->args[1]);\n            // Sort the arguments to call build_associative_from_args\n            std::reverse(new_args.begin(), new_args.end()); // Invert vector to have the bigger ones first\n            res = build_associative_from_args(e->args[0], e->op(), new_args);\n            return res;\n        }else if( op_is_left_associative(e->op()) && e->args[0]->is_binop(e->op())){\n            // Left associative -> (a/b)/c -> (a/c)/b\n            new_args.push_back(e->args[1]);\n            res = build_left_associative_from_args(e->args[0], e->op(), new_args);\n            return res;\n        }\n        // Canonize and return\n        if( new_args.size() > 0 ){\n            // Group higher args together first\n            while( new_args.size() > 1 ){\n                e1 = new_args.back();\n                new_args.pop_back();\n                e2 = new_args.back();\n                new_args.pop_back();\n                new_args.push_back(make_shared<ExprBinop>(e->op(), e1, e2));\n            }\n            return new_args.back();\n        }else{\n            // Nothing to do, return the same expression\n            return e;\n        }\n    /* Concat */\n    }else if( e->is_concat() ){\n        if( e->args[0]->is_concat() )\n            return concat(e->args[0]->args[0], concat(e->args[0]->args[1], e->args[1]));\n        else\n            return e;\n    }else\n        return e; \n}\n\n/* ====================================== */\n/* Misc operations and functions on enums */ \nbool operator<(Op op1, Op op2){\n    return static_cast<int>(op1) < static_cast<int>(op2);\n}\nbool op_is_symetric(Op op){\n    return (op == Op::ADD || op == Op::AND || op == Op::MUL || op == Op::MULH ||\n            op == Op::OR || op == Op::XOR || op == Op::SMULL ||\n            op == Op::SMULH );\n}\nbool op_is_associative(Op op){\n    return (op == Op::ADD || op == Op::AND || op == Op::MUL || op == Op::MULH ||\n            op == Op::OR || op == Op::XOR || op == Op::SMULL ||\n            op == Op::SMULH );\n}\nbool op_is_left_associative(Op op){\n    return (op == Op::DIV);\n}\n\nbool op_is_multiplication(Op op){\n    return (op == Op::MUL || op == Op::SMULL || op == Op::SMULH || op == Op::MULH);\n}\n\nbool op_is_distributive_over(Op op1, Op op2){\n    switch(op1){\n        case Op::MUL:\n        case Op::MULH: \n        case Op::SMULL:\n        case Op::SMULH: \n            return (op2 == Op::ADD);\n        case Op::AND: return (  op2 == Op::AND ||\n                                op2 == Op::OR );\n        case Op::OR: return (   op2 == Op::OR ||\n                                op2 == Op::AND );\n        default: return false;\n    }\n}\n\nbool operator<(ExprType t1, ExprType t2){\n    return static_cast<int>(t1) < static_cast<int>(t2);\n}\n\n/* Constant manipulation */\ncst_t cst_sign_trunc(exprsize_t size, cst_t val){\n    if( size == sizeof(cst_t)*8 )\n        return val;\n    else\n        return val & (((ucst_t)1<<(ucst_t)size)-1);\n}\ncst_t cst_mask(exprsize_t size){\n    if( size == sizeof(cst_t)*8 )\n        return -1;\n    else\n        return ((ucst_t)1<<size)-1; \n}\ncst_t cst_sign_extend(exprsize_t size, cst_t c){\n    if( size == sizeof(cst_t)*8 ){\n        return c;\n    }else{\n        /* Adjust the sign to whole variable  */\n        if( ((ucst_t)1<<((ucst_t)size-1)) & (ucst_t)c ){\n            // Negative, set higher bits to 1\n            return ((ucst_t)0xffffffffffffffff<< size) | c; \n        }else{\n            // Positive, set higher bits to 0\n            return ((((ucst_t)1<<size)-1) & c);\n        }\n    }\n}\n\n/* ====================================== */\nVarContext::VarContext(int i): id(i){}\n\nvoid VarContext::set(const string& name, cst_t value){\n    varmap[name] = value;\n    id++;\n}\n\ncst_t VarContext::get(const string& name){\n    map<string, cst_t>::iterator it; \n    if( ( it = varmap.find(name)) == varmap.end())\n        throw expression_exception(QuickFmt() << \"Trying to access variable '\" \n            << name << \"' which is unknown in context\" >> QuickFmt::to_str);\n    return it->second;\n}\n\nvoid VarContext::remove(const string& name){\n    varmap.erase(name);\n    id++;\n}\n\nvoid VarContext::print(ostream& os ){\n    os << std::endl;\n    for( auto var : varmap ){\n        os << var.first << \" : \" << std::hex << \"0x\" << var.second << std::dec << std::endl;\n    }\n}\n\nostream& operator<<(ostream& os, VarContext& c){\n    c.print(os);\n    return os;\n}\n"
  },
  {
    "path": "libropium/symbolic/simplification.cpp",
    "content": "#include \"simplification.hpp\"\n#include \"expression.hpp\"\n#include \"exception.hpp\"\n#include <iostream>\n#include <algorithm>\n#include <iterator>\n\nusing std::make_shared;\n\n/* ExprSimplifier implementation */ \nExprSimplifier::ExprSimplifier(){}\n\nvoid ExprSimplifier::add(ExprSimplifierFunc func){\n    simplifiers.push_back(func);\n}\nvoid ExprSimplifier::add(RecExprSimplifierFunc func){\n    rec_simplifiers.push_back(func);\n}\nvoid ExprSimplifier::add_restruct(RecExprSimplifierFunc func){\n    restruct_simplifiers.push_back(func);\n}\n\nExpr ExprSimplifier::run_simplifiers(Expr e){\n    Expr tmp_expr = e; \n    vector<ExprSimplifierFunc>::iterator func;\n    vector<RecExprSimplifierFunc>::iterator rec_func;\n    /* Normal functions */ \n    for (func = simplifiers.begin(); func != simplifiers.end(); func++){\n        tmp_expr = (**func)(tmp_expr);\n    }\n    /* Recursive functions */ \n    for (rec_func = rec_simplifiers.begin(); rec_func != rec_simplifiers.end(); rec_func++)\n        tmp_expr = (**rec_func)(tmp_expr, *this);\n    return tmp_expr; \n}\n\nExpr ExprSimplifier::simplify(Expr e){\n    Expr tmp_expr = e;\n    Expr prev_expr;\n    Expr prev_arg;\n    // Check if already simplified or if simple constant\n    if( e->_is_simplified || e->is_cst()){\n        return e;\n    }else if( e->_simplified_expr != nullptr ){\n        return e->_simplified_expr;\n    }\n    // Simplify util fix point is found\n    do{\n        prev_expr = tmp_expr;\n        tmp_expr = run_simplifiers(tmp_expr);\n        /* If no high level change, simplify arguments and try again */\n        /* !!! Don't enter the block if args.size() == 0 because it would\n         * cause basic expressions (cst, var) to loose their taint ! */\n        if( prev_expr->eq(tmp_expr) && tmp_expr->args.size() > 0){\n            // Simplify args in place :)\n            for( int i = 0; i < tmp_expr->args.size(); i++ ){\n                tmp_expr->args[i] = simplify(tmp_expr->args[i]);\n            }\n            // ! If binop we recanonize it because arguments changed !\n            tmp_expr = expr_canonize(tmp_expr);\n            // ! We remove the hash and taint of tmp_expr because we modify its \n            // arguments directly in the AST  \n            tmp_expr->_hashed = false;\n            // Re-apply simplifications\n            prev_expr = tmp_expr; \n            tmp_expr = run_simplifiers(tmp_expr);\n        }\n    }while( prev_expr->neq(tmp_expr) );\n    if( tmp_expr->neq(e) ){\n        // If the expression was simplified then save the pointer to the \n        // simplified expression. If not the don't save anything because\n        // the object shouldn't hold a shared pointer to itself :/\n        e->_simplified_expr = tmp_expr;\n    }\n    tmp_expr->_is_simplified = true;\n    return tmp_expr;\n}\n\nExprSimplifier* NewDefaultExprSimplifier(){\n    ExprSimplifier* simp = new ExprSimplifier();\n    simp->add(es_constant_folding);\n    simp->add(es_neutral_elements);\n    simp->add(es_absorbing_elements);\n    simp->add(es_arithmetic_properties);\n    simp->add(es_involution);\n    simp->add(es_extract_patterns);\n    simp->add(es_basic_transform);\n    simp->add(es_logical_properties);\n    simp->add(es_concat_patterns);\n    simp->add(es_arithmetic_factorize);\n    //simp->add(es_generic_distribute);\n    simp->add(es_generic_factorize);\n    //simp->add(es_deep_associative);\n    return simp;\n}\n\n/* ==================================================\n                 Light simplifications\n   ================================================= */\n/* Constant folding */\nExpr es_constant_folding(Expr e){\n    Expr res = nullptr;\n    cst_t _concrete, high, low;\n    ucst_t mask;\n\n    if( e->is_binop() && e->args[0]->is_cst() &&\n        e->args[1]->is_cst()){\n        /* Binary operators */\n        switch(e->op()){\n            case Op::ADD: _concrete = (e->args[0]->cst() + e->args[1]->cst()); break;\n            case Op::MUL: _concrete = ((ucst_t)e->args[0]->cst() * (ucst_t)e->args[1]->cst()); break;\n            case Op::MULH: _concrete = (cst_t)(((__uint128_t)cst_sign_trunc(e->args[0]->size, e->args[0]->cst()) * \n                                                            cst_sign_trunc(e->args[1]->size, (__uint128_t)e->args[1]->cst())) >> e->size ); break;\n            case Op::DIV: _concrete = ((ucst_t)cst_sign_trunc(e->args[0]->size, e->args[0]->cst()) / (ucst_t)cst_sign_trunc(e->args[1]->size, e->args[1]->cst())); break;\n            case Op::SDIV: _concrete = (e->args[0]->cst() / e->args[1]->cst()); break;\n            case Op::AND: _concrete = (e->args[0]->cst() & e->args[1]->cst()); break;\n            case Op::OR: _concrete = (e->args[0]->cst() | e->args[1]->cst()); break;\n            case Op::XOR: _concrete = (e->args[0]->cst() ^ e->args[1]->cst()); break;\n            case Op::MOD: _concrete = ((ucst_t)e->args[0]->cst() % (ucst_t)e->args[1]->cst()); break;\n            case Op::SMOD: _concrete = (e->args[0]->cst() % e->args[1]->cst()); break;\n            case Op::SMULL: _concrete = (cst_t)((__int128_t)e->args[0]->cst() * e->args[1]->cst()); break;\n            case Op::SMULH: _concrete = (cst_t)(((__int128_t)e->args[0]->cst() * e->args[1]->cst()) >> e->size); break;\n            case Op::SHL: \n                if( e->args[1]->cst() >= e->args[0]->size ){\n                    _concrete = 0;\n                }else{ \n                    _concrete = ((ucst_t)cst_sign_trunc(e->args[0]->size, e->args[0]->cst())) << ((ucst_t)e->args[1]->cst());\n                }\n                break;\n            case Op::SHR: \n                if( cst_sign_trunc(e->args[1]->size, e->args[1]->cst()) >= e->args[0]->size ){\n                    _concrete = 0;\n                }else{\n                    _concrete = ((ucst_t)cst_sign_trunc(e->args[0]->size, e->args[0]->cst())) >> ((ucst_t)e->args[1]->cst());\n                }\n                break;\n            default: throw runtime_exception(\"Missing case in constant folding simplification\");\n        }\n        res = exprcst(e->size, cst_sign_extend(e->size, _concrete));\n    }else if( e->is_unop() && e->args[0]->is_cst()){\n        /* Unary operators */\n        switch(e->op()){\n            case Op::NEG: _concrete =cst_sign_extend(e->size, -(e->args[0]->cst())); break;\n            case Op::NOT: _concrete =cst_sign_extend(e->size, ~(e->args[0]->cst())); break;\n            default: throw runtime_exception(\"Missing case in constant folding simplification\");\n        }\n        res = exprcst(e->size, cst_sign_extend(e->size, _concrete));\n    }else if( e->is_bisz() && e->args[0]->is_cst()){\n        /* BISZ */\n        _concrete = (e->args[0]->cst() == 0)? e->mode() : e->mode()^1;\n        res = exprcst(e->size, cst_sign_extend(e->size, _concrete));\n    }else if(   e->is_extract() && e->args[0]->is_cst() &&\n                e->args[1]->is_cst() && e->args[2]->is_cst()){\n        /* Extract */ \n        high = e->args[1]->cst();\n        low = e->args[2]->cst();\n        if( high == 63 ){\n            mask = 0xffffffffffffffff;\n        }else{\n            mask = (((cst_t)1 << (high+1))-1);\n        }\n        _concrete =  ((ucst_t)e->args[0]->cst() & mask) >> (ucst_t)low;\n        res = exprcst(e->size, cst_sign_extend(e->size, _concrete));\n    }else if( e->is_concat() && e->args[0]->is_cst() &&\n                e->args[1]->is_cst() ){\n        /* Concat */ \n        high = e->args[0]->cst();\n        low = e->args[1]->cst();\n        _concrete = cst_sign_extend(e->size, (((ucst_t)high)<<(ucst_t)e->args[1]->size)\n                                             | (ucst_t)cst_sign_trunc(e->args[1]->size, low));\n        res = exprcst(e->size, cst_sign_extend(e->size, _concrete));\n    }\n    /* Return result */\n    if( res != nullptr ){\n        return res;\n    }else\n        return e;\n}\n\n/* Neutral elements */\nExpr es_neutral_elements(Expr e){\n    if( e->is_binop() && e->args[0]->is_cst()){\n        // 0 + X \n        if( e->op() == Op::ADD && e->args[0]->cst() == 0)\n            return e->args[1];\n        // 1 * X\n        else if( (op_is_multiplication(e->op())) && e->args[0]->cst() == 1)\n            return e->args[1];\n        // 0xfffff.... & X\n        else if( e->op() == Op::AND && cst_sign_trunc(e->size, e->args[0]->cst()) == cst_mask(e->size))\n            return e->args[1];\n        // 0 |^ X \n        else if( (e->op() == Op::OR || e->op() == Op::XOR)\n                 && e->args[0]->cst() == 0)\n            return e->args[1];\n    }else if( e->is_binop() && e->args[1]->is_cst()){\n        // X / 1\n        if( (e->op()==Op::DIV || e->op() == Op::SDIV) && e->args[1]->cst() == 1 )\n            return e->args[0];\n        // X << 0 or X >> 0\n        else if( (e->op() == Op::SHL || e->op() == Op::SHR) && e->args[1]->cst() == 0 )\n            return e->args[0];\n    }else if(e->is_extract() && e->args[1]->is_cst() && e->args[2]->is_cst()){\n        // Extract(X, sizeof(X)-1, 0)\n        if( e->args[1]->cst() == e->args[0]->size-1 && e->args[2]->cst() == 0 )\n            return e->args[0];\n    }\n    return e;\n}\n\n/* Absorbing elements */\nExpr es_absorbing_elements(Expr e){\n    if( !e->is_binop() )\n        return e; \n    \n    if( e->args[0]->is_cst()){\n        // 0 &*//S X\n        if( (e->op() == Op::AND || op_is_multiplication(e->op()) || e->op() == Op::DIV || e->op() == Op::SDIV) \n             && e->args[0]->cst() == 0)\n            return e->args[0];\n        // 0xffff..... | X \n        else if( (e->op() == Op::OR)\n                 && cst_sign_trunc(e->size, e->args[0]->cst()) == cst_mask(e->size))\n            return e->args[0];\n        // X << sizeof(X) or X >> sizeof(X)\n    }else if( (e->op() == Op::SHL || e->op() == Op::SHR) && e->args[1]->is_cst() &&\n            (e->args[1]->cst() >= (cst_t)e->size)){\n            return exprcst(e->size, 0);\n    }\n    return e;\n}\n\n/* ADD specific simplifications */\nExpr es_arithmetic_properties(Expr e){\n    if( !e->is_binop(Op::ADD)){\n        return e;\n    }\n    // X-X --> 0 \n    if( e->args[1]->is_unop(Op::NEG) && e->args[0]->eq(e->args[1]->args[0])){\n        return exprcst(e->size, 0);\n    // X+(-1*X) --> 0 \n    if( e->args[1]->is_binop() && op_is_multiplication(e->args[1]->op()) \n         && e->args[1]->args[0]->is_cst()\n         && e->args[1]->args[0]->cst() == -1 && e->args[1]->args[1]->eq(e->args[0])){\n        return exprcst(e->size, 0);\n    // -X+X --> 0\n    }else if( e->args[0]->is_unop(Op::NEG) && e->args[1]->eq(e->args[0]->args[0])){\n        return exprcst(e->size, 0);\n    // (-1*X)+X --> 0\n    }else if(e->args[1]->is_binop() && op_is_multiplication(e->args[1]->op()) \n            && e->args[0]->args[0]->is_cst()\n            && e->args[0]->args[0]->cst() == -1 && e->args[0]->args[1]->eq(e->args[1]))\n        return exprcst(e->size, 0);\n    }\n    return e;\n}\n\n/* NEG and NOT involution simplifications */\nExpr es_involution(Expr e){\n    if( e->is_unop(Op::NEG) || e->is_unop(Op::NOT)){\n        if( e->args[0]->is_unop(e->op()))\n            return e->args[0]->args[0];\n    }\n    return e;\n}\n\n/* Extract specific simplifications */ \nExpr es_extract_patterns(Expr e){\n    if( e->is_extract() ){\n        if( e->args[0]->is_concat() && e->args[1]->is_cst() && e->args[2]->is_cst()){\n            // extract(concat(X,Y), a, b) --> extract(X, a', b')\n            if( e->args[2]->cst() >= e->args[0]->args[1]->size ){\n                return extract( e->args[0]->args[0], \n                                e->args[1]->cst()-e->args[0]->args[1]->size,\n                                e->args[2]->cst()-e->args[0]->args[1]->size);\n            }\n            // extract(concat(X,Y), a, b) --> extract(Y, a', b')\n            else if( e->args[1]->cst() < e->args[0]->args[1]->size ){\n                return extract( e->args[0]->args[1], \n                                e->args[1]->cst(),\n                                e->args[2]->cst());\n            }\n        // extract(extract(X,a,b),c,d) --> extract(X, a',b')\n        }else if( e->args[0]->is_extract() && \n                (e->args[0]->args[2]->size == e->args[1]->size) &&\n                (e->args[0]->args[2]->size == e->args[2]->size)){\n            return extract(e->args[0]->args[0],\n                e->args[0]->args[2]->cst()+e->args[1]->cst(),\n                e->args[0]->args[2]->cst()+e->args[2]->cst());\n        }\n    }\n    return e;\n}\n\n/* Basic transformations to canonize expressions a bit more */\nExpr es_basic_transform(Expr e){\n    if( e->is_binop(Op::SHL) && e->args[1]->is_cst()){\n        // X << Y --> X * (2**Y)\n        return e->args[0]*exprcst(e->size, ((ucst_t)1<<(ucst_t)e->args[1]->cst()));\n    }else if( e->is_binop(Op::SHR) && e->args[1]->is_cst() ){\n        // X >> Y --> X / (2**Y)\n        return e->args[0]/exprcst(e->size, ((ucst_t)1<<(ucst_t)(e->args[1]->cst())));\n    // -X --> -1*X\n    }else if(e->is_unop(Op::NEG)){\n        return exprcst(e->size, -1)*e->args[0]; \n    // -Y*X --> -(Y*X)\n    /*\n    }else if( e->is_binop(Op::MUL) && e->args[0]->is_unop(Op::NEG)){\n        return -(e->args[0]->args[0]*e->args[1]); */\n    // X*-Y --> -(X*Y)\n    /*\n    }else if( e->is_binop(Op::MUL) && e->args[1]->is_unop(Op::NEG)){\n        return -(e->args[1]->args[0]*e->args[0]); */\n    // 1+~X --> -X\n    }else if( e->is_binop(Op::ADD) && e->args[0]->is_cst() && e->args[0]->cst()==1\n                && e->args[1]->is_unop(Op::NOT)){\n        return -e->args[1]->args[0];\n    // -1^X --> ~X\n    }else if( e->is_binop(Op::XOR) && e->args[0]->is_cst() && e->args[0]->cst() == -1){\n        return ~e->args[1];\n    // CST*-Y --> -CST*Y\n    }else if(  ((e->is_binop() && op_is_multiplication(e->op())) || e->is_binop(Op::SDIV)) &&\n               (e->args[1]->is_unop(Op::NEG)) &&\n               (e->args[0]->is_cst()) ) {\n        return exprbinop(e->op(), -e->args[0], e->args[1]->args[0]);\n    // (-Y)*CST --> Y*(-CST) \n    }else if(  ((e->is_binop() && op_is_multiplication(e->op())) || e->is_binop(Op::SDIV)) &&\n               (e->args[0]->is_unop(Op::NEG)) &&\n               (e->args[1]->is_cst()) ) {\n        return exprbinop(e->op(), e->args[0]->args[0], -e->args[1]);\n    }\n    return e;\n}\n\n/* logical properties */ \nExpr es_logical_properties(Expr e){\n    // X &| X --> X\n    if( (e->is_binop(Op::AND) || e->is_binop(Op::OR)) && ( e->args[0]->eq(e->args[1]))){\n        return e->args[0];\n    // X & ~X --> 0\n    }else if( e->is_binop(Op::AND) && e->args[1]->is_unop(Op::NOT) && \n            e->args[0]->eq(e->args[1]->args[0])){\n        return exprcst(e->size, 0);\n    // ~X & X --> 0\n    }else if( e->is_binop(Op::AND) && e->args[0]->is_unop(Op::NOT) && \n            e->args[1]->eq(e->args[0]->args[0])){\n        return exprcst(e->size, 0);\n    // ~X |^ X --> 0xfffff....\n    }else if( (e->is_binop(Op::OR) || e->is_binop(Op::XOR)) && e->args[0]->is_unop(Op::NOT) && \n            e->args[1]->eq(e->args[0]->args[0])){\n        return exprcst(e->size, (cst_t)-1);\n    // X |^ ~X --> 0xfffff....\n    }else if( (e->is_binop(Op::OR) || e->is_binop(Op::XOR)) && e->args[1]->is_unop(Op::NOT) && \n            e->args[0]->eq(e->args[1]->args[0])){\n        return exprcst(e->size, (cst_t)-1);\n    // X ^ X --> 0\n    }else if( e->is_binop(Op::XOR) && e->args[0]->eq(e->args[1])){\n        return exprcst(e->size, 0);\n    }\n    return e;\n}\n\n/* Concat simplification patterns */\nExpr es_concat_patterns(Expr e){\n    // concat(X[a:b], X[b-1:c]))\n    if( e->is_concat() && e->args[0]->is_extract() && e->args[1]->is_extract() &&\n            e->args[0]->args[0]->eq(e->args[1]->args[0]) && \n            e->args[0]->args[2]->cst() == e->args[1]->args[1]->cst()+1){\n        return extract(e->args[0]->args[0], e->args[0]->args[1], e->args[1]->args[2]);\n    }\n    if( e->is_binop(Op::AND) && e->args[0]->is_cst() && e->args[1]->is_concat()){\n\n        if( cst_sign_trunc(e->args[0]->size, e->args[0]->cst()) == (((ucst_t)1<<e->args[1]->args[1]->size)-1)){\n            if( e->args[1]->args[1]->is_cst() && e->args[1]->args[1]->is_cst() == 0 ){\n                // concat(X,0) & 0x000...11111 = 0\n                return exprcst(e->size, 0);\n            }else{\n                // concat(X,Y) & 0x000...11111 = concat(0, Y)\n                return concat(exprcst(e->args[1]->args[0]->size, 0), e->args[1]->args[1]);\n            }\n        }\n        \n        if( e->args[0]->cst() == (((cst_t)-1)<<e->args[1]->args[1]->size)){\n            if( e->args[1]->args[0]->is_cst() && e->args[1]->args[0]->is_cst() == 0 ){\n                // concat(0,Y) & 0x111...000 = 0\n                return exprcst(e->size, 0);\n            }else{ \n                // concat(X,Y) & 0x111...000 = concat(X, 0)\n                return concat(e->args[1]->args[0], exprcst(e->args[1]->args[1]->size, 0));\n            }\n        }\n    }\n    return e; \n}\n\n/* Basic factorization patterns */\nExpr es_arithmetic_factorize(Expr e){\n    if( !e->is_binop(Op::ADD))\n        return e;\n    if( e->args[0]->is_binop() && op_is_multiplication(e->args[0]->op())){\n        // (X*Y)+Y --> (X+1)*Y\n        if (e->args[0]->args[1]->eq(e->args[1])){\n            return (e->args[0]->args[0]+exprcst(e->size,1))*(e->args[1]);\n        // (Y*X)+Y --> (X+1)*Y\n        }else if(e->args[0]->args[0]->eq(e->args[1])){\n            return (e->args[0]->args[1]+exprcst(e->size,1))*(e->args[1]);\n        // (X*Y)-Y --> (X-1)*Y\n        }else if(e->args[1]->is_unop(Op::NEG) && \n                e->args[0]->args[1]->eq(e->args[1]->args[0])){\n            return (e->args[0]->args[0]-exprcst(e->size,1))*e->args[0]->args[1]; \n        \n        }else if( e->args[1]->is_binop() && op_is_multiplication(e->args[1]->op())){\n            // (Y*X)-Y --> (X-1)*Y\n            if(e->args[1]->is_unop(Op::NEG) && \n                    e->args[0]->args[0]->eq(e->args[1]->args[0])){\n                return (e->args[0]->args[1]-exprcst(e->size,1))*e->args[0]->args[0];\n            // (A*Y)+(B*Y) --> (A+B)*Y\n            }else if( e->args[0]->args[1]->eq(e->args[1]->args[1])){\n                return (e->args[0]->args[0]+e->args[1]->args[0])*e->args[0]->args[1];\n            // (A*Y)+(Y*B) --> (A+B)*Y\n            }else if( e->args[0]->args[1]->eq(e->args[1]->args[0])){\n                return (e->args[0]->args[0]+e->args[1]->args[1])*e->args[0]->args[1];\n            // (Y*A)+(B*Y) --> (A+B)*Y\n            }else if( e->args[0]->args[0]->eq(e->args[1]->args[1])){\n                return (e->args[0]->args[1]+e->args[1]->args[0])*e->args[0]->args[0];\n            // (Y*A)+(Y*B) --> (A+B)*Y\n            }else if( e->args[0]->args[0]->eq(e->args[1]->args[0])){\n                return (e->args[0]->args[1]+e->args[1]->args[1])*e->args[0]->args[0];\n            }\n        }\n    }else if(e->args[1]->is_binop() && op_is_multiplication(e->args[1]->op())) {\n        // Y+(X*Y) --> (X+1)*Y\n        if(e->args[0]->eq(e->args[1]->args[1])){\n            return (e->args[1]->args[0]+exprcst(e->size, 1))*(e->args[0]);\n        // Y+(Y*X) --> (X+1)*Y\n        }else if( e->args[0]->eq(e->args[1]->args[0]) ){\n            return (e->args[1]->args[1]+exprcst(e->size,1))*(e->args[0]);\n        // -Y+(Y*X) --> (X-1)*Y\n        }else if( e->args[0]->is_unop(Op::NEG) &&\n                e->args[0]->args[0]->eq(e->args[1]->args[0])){\n            return (e->args[1]->args[1]-exprcst(e->size, 1))*e->args[0]->args[0];\n        // -Y+(X*Y) --> (X-1)*Y\n        }else if( e->args[0]->is_unop(Op::NEG) &&\n                e->args[0]->args[0]->eq(e->args[1]->args[1])){\n            return (e->args[1]->args[0]-exprcst(e->size, 1))*e->args[0]->args[0];\n        }\n    // X+X --> 2*X\n    }else if( e->args[0]->eq(e->args[1])){\n        return exprcst(e->size, 2)*e->args[0];\n    }\n    return e; \n}\n\n/* Generic factorization on distributive operators */\nExpr es_generic_factorize(Expr e){\n    if( e->is_binop() && e->args[0]->is_binop() && \n          e->args[1]->is_binop() && e->args[0]->op() == e->args[1]->op() &&\n          op_is_distributive_over(e->args[0]->op(), e->op()) && \n          op_is_symetric(e->args[0]->op())){\n        // (AxB)o(AxC) --> Ax(BoC)\n        if( e->args[0]->args[0]->eq(e->args[1]->args[0])){\n            return exprbinop(e->args[0]->op(), e->args[0]->args[0], \n                        exprbinop(e->op(), e->args[0]->args[1], e->args[1]->args[1]));\n        // (AxB)o(CxA)\n        }else if( e->args[0]->args[0]->eq(e->args[1]->args[1])){\n            return exprbinop(e->args[0]->op(), e->args[0]->args[0], \n                        exprbinop(e->op(), e->args[0]->args[1], e->args[1]->args[0]));\n        // (BxA)o(CxA)\n        }else if( e->args[0]->args[1]->eq(e->args[1]->args[1])){\n            return exprbinop(e->args[0]->op(), e->args[0]->args[1], \n                        exprbinop(e->op(), e->args[0]->args[0], e->args[1]->args[0]));\n        // (BxA)o(AxC)\n        }else if( e->args[0]->args[1]->eq(e->args[1]->args[0])){\n            return exprbinop(e->args[0]->op(), e->args[0]->args[1], \n                        exprbinop(e->op(), e->args[0]->args[0], e->args[1]->args[1]));\n        }\n    }\n    return e; \n}\n\n/* Propagate distributive operators */\nExpr es_generic_distribute(Expr e){\n    // (AxB)oC --> AoC x BoC\n    if( e->is_binop() && e->args[0]->is_binop() &&\n            op_is_distributive_over(e->op(), e->args[0]->op())){\n        return exprbinop(e->args[0]->op(), \n                exprbinop(e->op(), e->args[0]->args[0], e->args[1]),\n                exprbinop(e->op(), e->args[0]->args[1], e->args[1]));\n    // Co(AxB) --> CoA x CoB\n    }else if( e->is_binop() && e->args[1]->is_binop() &&\n            op_is_distributive_over(e->op(), e->args[1]->op())){\n        return exprbinop(e->args[1]->op(), \n                exprbinop(e->op(), e->args[0], e->args[1]->args[0]),\n                exprbinop(e->op(), e->args[0], e->args[1]->args[1]));\n    }\n    return e;\n}\n\n/* =========================================\n *           Heavy Simplifications\n * ========================================= */\n\n/* Associativity deep in the AST \n * Try to unfold nested associative operators and see if \n * pairs can be simplified for all possible associations */\nExpr es_deep_associative(Expr e, ExprSimplifier& s){\n    vector<Expr> vec;\n    Expr expr, simp, tmp1, tmp2;\n    bool restart;\n    int i=0, j=0;\n    if( e->is_binop() && op_is_associative(e->op()) &&\n            op_is_symetric(e->op())){\n        /* Get all args */\n        e->get_associative_args(e->op(), vec);\n        /* If only two args, no need to simplify in depth */ \n        if( vec.size() <= 2 ){\n            return e; \n        }\n        /* Else enter simplify loop */\n        while( i < vec.size()-1 ){\n            j = i+1;\n            tmp1 = vec[i];\n            restart = false;\n            while( j < vec.size() ){\n                tmp2 = vec[j];\n                /* Normal op */ \n                expr = exprbinop(e->op(), tmp1, tmp2);\n                /* Simplified one */ \n                simp = s.simplify(expr);\n                /* If changed, push the new one and continue,\n                 * else continue with other args */\n                if( expr->neq(simp) ){\n                    vec.erase(std::next(vec.begin(),j));\n                    vec.erase(vec.begin()+i);\n                    i = 0;\n                    /* Insert in sorted ! */\n                    vec.push_back(simp);\n                    restart = true;\n                    break;\n                }else{\n                    j++;\n                }\n            }\n            if( !restart){\n                i++;\n            }\n        }\n        /* Recombine all expressions */ \n        std::sort(vec.begin(), vec.end());\n        while( vec.size() > 1 ){\n            tmp1 = vec.back(); \n            vec.pop_back();\n            tmp2 = vec.back();\n            vec.pop_back();\n            vec.push_back(exprbinop(e->op(), tmp1, tmp2));\n        }\n        return vec.back();\n    }\n    return e; \n}\n\n"
  },
  {
    "path": "libropium/symbolic/symbolic.cpp",
    "content": "#include \"symbolic.hpp\"\n#include \"exception.hpp\"\n#include \"simplification.hpp\"\n#include <cassert>\n#include <vector>\n#include <iostream>\n#include <algorithm>\n#include <sstream>\n\nusing std::get;\nusing std::vector;\nusing std::stringstream;\nusing std::make_shared;\n\nSemantics::Semantics(IRContext* r, MemContext* m): regs(r), mem(m){}\nvoid Semantics::simplify(){\n    ExprSimplifier* simp = NewDefaultExprSimplifier();\n    for( int reg = 0; reg < regs->nb_vars(); reg++ ){\n        regs->set(reg, simp->simplify(regs->get(reg)));\n    }\n    for( unordered_map<Expr, Expr>::iterator write = mem->writes.begin(); write != mem->writes.end(); write++ ){\n        mem->writes[write->first] = simp->simplify(write->second);\n    }\n    delete simp;\n}\n\nSemantics::~Semantics(){\n    delete regs; regs = nullptr;\n    delete mem; mem = nullptr;\n}\n\nostream& operator<<(ostream& os, Semantics& s){\n    os << *(s.regs) << std::endl << *(s.mem);\n    return os;\n}\n\n/* ======================================= */\n\nSymbolicEngine::SymbolicEngine(ArchType a){\n    if(a == ArchType::X86){\n        arch = new ArchX86();\n    }else if( a == ArchType::X64 ){\n        arch = new ArchX64();\n    }else if (a == ArchType::NONE){\n        arch = new ArchNone();\n    }else{\n        throw symbolic_exception(\"SymbolicEngine::SymbolicEngine() unsupported ArchType\");\n    }\n}\n\nSymbolicEngine::~SymbolicEngine(){\n    delete arch; arch = nullptr;\n}\n\n/* Some util functions to manipulate values during symbolic execution */\nExpr _reduce_rvalue(Expr e, exprsize_t high, exprsize_t low ){\n    if( high-low+1 == e->size )\n        return e;\n    else\n        return extract(e, high, low);\n}\n\nExpr _expand_lvalue(Expr current, Expr e, exprsize_t high, exprsize_t low){\n    if( high-low+1 >= current->size )\n        return e;\n    else if(low == 0){\n        return concat(extract(current, current->size-1, high+1), e);\n    }else if(high == current->size-1){\n        return concat(e, extract(current, low-1, 0));\n    }else{\n        return concat(extract(current, current->size-1, high+1),\n                      concat(e, extract(current, low-1, 0))); \n    }\n}\n\ninline void _set_tmp_var(int num, Expr e, int high, int low, vector<Expr>& tmp_vars){\n    unsigned int tmp_vars_size = tmp_vars.size();\n    if( tmp_vars_size <= num ){\n        /* Fill missing tmp variables if needed *//*\n        for( int i = 0; i < (num - tmp_vars_size); i++){\n            tmp_vars.push_back(nullptr);\n        }*/\n        std::fill_n(std::back_inserter(tmp_vars), (num - tmp_vars_size+1), nullptr);\n    }\n    \n    if( tmp_vars[num] == nullptr ){\n        if( low == 0 ){\n            tmp_vars[num] = e;\n        }else{\n            /* If new tmp and low is != 0, then we pad the lower bits\n             * with zero. That's a ugly hack but used to avoid some bugs \n             * for some instructions when their IR gets optimized */\n            tmp_vars[num] = _expand_lvalue(exprcst(high+1, 0), e, high, low);\n        }\n    }else{\n        tmp_vars[num] = _expand_lvalue(tmp_vars[num], e, high, low);\n    }\n}\n\nExpr _get_operand(IROperand& arg, IRContext* irctx, vector<Expr>& tmp_vars){\n    if( arg.is_cst() ){\n        if( arg.high-arg.low+1 == sizeof(cst_t)*8 )\n            return exprcst(arg.high-arg.low+1, arg.cst());\n        else\n            return exprcst(arg.high-arg.low+1, \n                ((ucst_t)arg.cst() & (((ucst_t)1 << (arg.high+1))-1)) >> (ucst_t)arg.low);\n    }else if( arg.is_var() ){\n        return _reduce_rvalue(irctx->get(arg.var()), arg.high, arg.low);\n    }else if( arg.is_tmp() ){\n        return _reduce_rvalue(tmp_vars[arg.tmp()], arg.high, arg.low);\n    }else{\n        return nullptr;\n    }\n}\n\nvoid _update_dereferenced_regs(bool* deref, Expr e){\n    switch( e->type ){\n        case ExprType::CST:\n        case ExprType::MEM:\n        case ExprType::UNKNOWN:\n            return;\n        case ExprType::VAR:\n            deref[e->reg()] = true;\n            break;\n        case ExprType::UNOP:\n            _update_dereferenced_regs(deref, e->args[0]);\n            break;\n        case ExprType::BINOP:\n        case ExprType::CONCAT:\n        case ExprType::EXTRACT:\n            _update_dereferenced_regs(deref, e->args[0]);\n            _update_dereferenced_regs(deref, e->args[1]);\n            break;\n        default:\n            break;\n    }\n}\n\n#define DELETE_ALL_OBJECTS() delete regs; delete mem; delete simp;\nSemantics* SymbolicEngine::execute_block(IRBlock* block){\n    Expr rvalue, dst, src1, src2;\n    IRBasicBlock::iterator instr;\n    bool stop = false;\n    IRContext* regs = new IRContext(arch->nb_regs);\n    MemContext* mem = new MemContext();\n    vector<Expr> tmp_vars;\n    ExprSimplifier *simp = NewDefaultExprSimplifier(); \n    IRBasicBlockId bblkid = 0;\n\n    /* Init context */\n    for( reg_t reg = 0; reg < arch->nb_regs; reg++){\n        regs->set(reg, exprvar(arch->bits, arch->reg_name(reg), reg));\n    }\n    block->known_max_sp_inc = true;\n    block->max_sp_inc = 0;\n\n    // FOR DEBUG \n    // std::cout << \"DEBUG EXECUTING \" << block->name << std::endl;\n\n    while( !stop ){\n        /* ====================== Execute an IR basic block ======================== */ \n        /* Execute the basic block as long as there is no reason to stop */\n        for( instr = block->get_bblock(bblkid).begin(); instr != block->get_bblock(bblkid).end(); instr++){\n            // FOR DEBUG\n            // std::cout << \"DEBUG, executing \" << *instr << std::endl;\n\n            /* Get operands expressions */\n            src1 = _get_operand(instr->src1, regs, tmp_vars);\n            src2 = _get_operand(instr->src2, regs, tmp_vars);\n            \n            /* Arithmetic and logic operations */\n            if( iroperation_is_assignment(instr->op)){\n                /* Build rvalue */\n                switch( instr->op ){\n                    case IROperation::ADD:\n                        rvalue = src1 + src2;\n                        break;\n                    case IROperation::SUB:\n                        rvalue = src1 - src2; \n                        break;\n                    case IROperation::MUL: \n                        rvalue = src1 * src2;\n                        break;\n                    case IROperation::MULH: \n                        rvalue = mulh(src1, src2);\n                        break;\n                    case IROperation::SMULL: \n                        rvalue = smull(src1,src2);\n                        break;\n                    case IROperation::SMULH: \n                        rvalue = smulh(src1,src2);\n                        break;\n                    case IROperation::DIV:\n                        rvalue = src1 / src2;\n                        break;\n                    case IROperation::SDIV: \n                        rvalue = sdiv(src1, src2);\n                        break;\n                    case IROperation::SHL: \n                        rvalue = shl(src1, src2);\n                        break;\n                    case IROperation::SHR: \n                        rvalue = shr(src1, src2);\n                        break;\n                    case IROperation::AND:\n                        rvalue = src1 & src2;\n                        break;\n                    case IROperation::OR:\n                        rvalue = src1 | src2;\n                        break;\n                    case IROperation::XOR:\n                        rvalue = src1 ^ src2;\n                        break;\n                    case IROperation::MOD:\n                        rvalue = src1 % src2;\n                        break;\n                    case IROperation::SMOD:\n                        rvalue = smod(src1,src2);\n                        break;\n                    case IROperation::NEG:\n                        rvalue = -src1;\n                        break;\n                    case IROperation::NOT:\n                        rvalue = ~src1;\n                        break;\n                    case IROperation::MOV:\n                        rvalue = src1;\n                        break;\n                    case IROperation::CONCAT:\n                        rvalue = concat(src1, src2);\n                        break;\n                    default: \n                        DELETE_ALL_OBJECTS()\n                        throw runtime_exception(\"Unsupported assignment IROperation in SymbolicEngine::execute_block()\");\n                }\n\n                /* Affect lvalue */\n                if( instr->dst.is_tmp()){\n                    _set_tmp_var(instr->dst.tmp(), rvalue, instr->dst.high, instr->dst.low, tmp_vars);\n                }else if( instr->dst.is_var()){\n                    regs->set(instr->dst.var(), _expand_lvalue(regs->get(instr->dst.var()), rvalue,\n                                                                    instr->dst.high, instr->dst.low));\n                }else{\n                    DELETE_ALL_OBJECTS()\n                    throw runtime_exception(\"SymbolicEngine::execute_block() got invalid dst operand type\");\n                }\n            }else if(instr->op == IROperation::STM){\n                /* Store memory */\n                dst = _get_operand(instr->dst, regs, tmp_vars);\n                /* THEN execute the store */\n                mem->write(dst, src1);\n                /* Record regs that have been dereferenced (in the address)*/\n                _update_dereferenced_regs(block->dereferenced_regs, dst);\n            }else if( instr->op == IROperation::LDM){\n                /* Load memory */  \n                /* Record regs that have been dereferenced (in the address)*/\n                _update_dereferenced_regs(block->dereferenced_regs, src1);\n                // Affect lvalue\n                rvalue = mem->read(src1, (instr->dst.high-instr->dst.low+1)/8);\n                if( instr->dst.is_tmp()){\n                    _set_tmp_var(instr->dst.tmp(), rvalue, instr->dst.high, instr->dst.low, tmp_vars);\n                }else if( instr->dst.is_var()){\n                    regs->set(instr->dst.var(), _expand_lvalue(regs->get(instr->dst.var()), rvalue,\n                                                                    instr->dst.high, instr->dst.low));\n                }else{\n                    DELETE_ALL_OBJECTS()\n                    throw runtime_exception(\"SymbolicEngine::execute_block() got invalid dst operand type\");\n                }\n            }else if( instr->op == IROperation::BCC){\n                dst = _get_operand(instr->dst, regs, tmp_vars);\n                /* Check condition and update basic block to execute */\n                if( !dst->is_cst() || !src1->is_cst() || (src2 != nullptr && !src2->is_cst())){\n                    DELETE_ALL_OBJECTS()\n                    throw symbolic_exception(\"BCC with non constant operand(s) not supported\");\n                }\n                if( cst_sign_trunc(dst->size, dst->cst()) != 0){\n                    bblkid = src1->cst();\n                }else{\n                    bblkid = src2->cst();\n                }\n                break;\n            }else if( instr->op == IROperation::JCC ){\n                dst = _get_operand(instr->dst, regs, tmp_vars);\n                /* Set new PC */\n                if( dst->is_cst() && cst_sign_trunc(dst->size, dst->cst()) != 0){\n                    regs->set(arch->pc(), _expand_lvalue(regs->get(arch->pc()), src1,\n                                                                instr->dst.high, instr->dst.low));\n                }else{\n                    DELETE_ALL_OBJECTS()\n                    throw symbolic_exception(\"JCC with non constant or null condition not supported\");\n                }\n                /* Quit this block */\n                stop = true; // Go out of this block\n                break; // Stop executing instructions in the basic block\n            }else if(instr->op == IROperation::BISZ){\n                if( !src2->is_cst() ){\n                    DELETE_ALL_OBJECTS()\n                    throw symbolic_exception(\"BISZ with not constant mode not supported\");\n                }\n                rvalue = bisz((instr->dst.high-instr->dst.low)+1 , src1, cst_sign_trunc(src2->size, src2->cst()));\n                /* Affect lvalue */\n                if( instr->dst.is_tmp()){\n                    _set_tmp_var(instr->dst.tmp(), rvalue, instr->dst.high, instr->dst.low, tmp_vars);\n                }else if( instr->dst.is_var()){\n                    regs->set(instr->dst.var(), _expand_lvalue(regs->get(instr->dst.var()), rvalue,\n                                                                    instr->dst.high, instr->dst.low));\n                }else{\n                    DELETE_ALL_OBJECTS()\n                    throw runtime_exception(\"SymbolicEngine::execute_block() got invalid dst operand type\");\n                }\n            \n            }else if(instr->op == IROperation::INT){\n                cst_t num = cst_sign_trunc(instr->dst.size, _get_operand(instr->dst, regs, tmp_vars)->concretize());\n                if( num != 0x80 ){\n                    DELETE_ALL_OBJECTS()\n                    throw symbolic_exception(\"SymbolicEngine::execute_block() interruption: got unsupported INT number\");\n                }\n                block->ends_with_int80 = true;\n                /* Quit this block */\n                stop = true; // Go out of this block\n                break; // Stop executing instructions in the basic block\n            }else if(instr->op == IROperation::SYSCALL){\n                block->ends_with_syscall = true;\n                /* Quit this block */\n                stop = true; // Go out of this block\n                break; // Stop executing instructions in the basic block\n            }else{\n                DELETE_ALL_OBJECTS()\n                throw runtime_exception(\"SymbolicEngine::execute_block(): unknown IR instruction type\");\n            }\n            \n            /* Check for sp increment */\n            Expr sp = regs->get(arch->sp());\n            sp = simp->simplify(sp);\n            cst_t sp_inc = 0xffffffff;\n            if( sp->is_binop(Op::ADD) && sp->args[0]->is_cst() &&\n                sp->args[0]->cst() % arch->octets == 0 && sp->args[1]->is_reg(arch->sp())){\n                // sp = sp0 + cst\n                sp_inc = sp->args[0]->cst();\n            }else if( sp->is_binop(Op::ADD) && sp->args[0]->is_unop(Op::NEG) &&\n                      sp->args[0]->args[0]->is_cst() && sp->args[0]->args[0]->cst() % arch->octets == 0 &&\n                      sp->args[1]->is_reg(arch->sp())){\n                // sp = sp0 - cst\n                sp_inc = -1*sp->args[0]->args[0]->cst();\n            }else if( sp->is_var() && arch->reg_num(sp->name()) == arch->sp()){\n                // sp = sp0\n                sp_inc = 0;\n            }else{\n                // sp is unknown\n                block->known_max_sp_inc = false;\n            }\n            // Assign max sp inc\n            if( sp_inc != 0xffffffff && block->known_max_sp_inc){\n                block->max_sp_inc = block->max_sp_inc > sp_inc ? block->max_sp_inc : sp_inc;\n            }\n        }\n    }\n\n    delete simp;\n    return new Semantics(regs, mem);\n}\n"
  },
  {
    "path": "libropium/utils/utils.cpp",
    "content": "#include \"utils.hpp\"\n#include \"exception.hpp\"\n#include <iostream>\n#include <fstream>\n#include <cstdio>\n#include <memory>\n#include <stdexcept>\n#include <string>\n#include <array>\n#include <vector>\n#include <exception>\n#include <signal.h>\n\nusing std::ifstream;\nusing std::ofstream;\nusing std::ios;\nusing std::stringstream;\nusing std::vector;\n\n/* ======== Raw gadgets interface ======== */\n// Read gadgets from file\nvector<RawGadget>* raw_gadgets_from_file(string filename){\n    vector<RawGadget>* res = new vector<RawGadget>();\n    RawGadget raw;\n    bool got_addr;\n    ifstream file;\n    string line;\n    string addr_str;\n    string byte;\n    \n    file.open(filename, ios::in | ios::binary );\n    while( getline(file, line)){\n        raw = RawGadget();\n        got_addr = false;\n        addr_str = \"\";\n        byte = \"\";\n        for( char& c : line ){\n            // First the gadget address\n            if( c == '$' ){\n                try{\n                    raw.addr = std::stoi(addr_str, 0, 16);\n                    if( raw.addr == 0 )\n                        throw std::invalid_argument(\"\");\n                    got_addr = true;\n                }catch(std::invalid_argument& e){\n                    throw runtime_exception(QuickFmt() << \"raw_gadgets_from_file: error, bad address string: \" << line >> QuickFmt::to_str);\n                }\n            }else if( !got_addr){\n                addr_str += c;\n            }else{\n                byte += c;\n                if( byte.size() == 2 ){\n                    try{\n                        raw.raw += (char)(std::stoi(byte, 0, 16));\n                        byte = \"\";\n                    }catch(std::invalid_argument& e){\n                        throw runtime_exception(QuickFmt() << \"raw_gadgets_from_file: error, bad byte in: \" << line >> QuickFmt::to_str);\n                    }\n                }\n            }\n        }\n        res->push_back(raw);\n    }\n    \n    file.close();\n    return res;\n}\n\n\n// Write gadgets to file from ROPgadget output\nvoid split(const std::string& str, vector<string>& cont, char delim = ' ')\n{\n    std::size_t current, previous = 0;\n    current = str.find(delim);\n    while (current != std::string::npos) {\n        cont.push_back(str.substr(previous, current - previous));\n        previous = current + 1;\n        current = str.find(delim, previous);\n    }\n    cont.push_back(str.substr(previous, current - previous));\n}\n\nbool ropgadget_to_file(string out, string ropgadget_out, string bin){\n    stringstream cmd;\n    ofstream out_file;\n    ifstream ropgadget_file;\n    string line;\n\n    out_file.open(out, ios::out);\n\n    cmd << \"ROPgadget --binary \" << bin << \" --dump --all --depth 15 > \" << ropgadget_out << std::endl; \n    try{\n\n        FILE* pipe = popen(cmd.str().c_str(), \"w\");\n        string addr_str, raw_str;\n        stringstream ss;\n        vector<string> splited;\n\n\n        if (!pipe) {\n            throw std::runtime_error(\"popen() failed!\");\n        }\n\n        pclose(pipe);\n        ropgadget_file.open(ropgadget_out, ios::in);\n        \n        while( std::getline(ropgadget_file, line)){\n            splited.clear();\n            split(line, splited);\n\n            // Get address string\n            if( splited.size() > 3 ){\n                addr_str = splited[0];\n            }else{\n                continue;\n            }\n            if( addr_str.substr(0, 2) != \"0x\" ){\n                continue;\n            }\n            // Get raw string\n            raw_str = splited.back();\n            if( raw_str.back() != '\\n' )\n                raw_str += '\\n';\n\n            // Write them to file\n            out_file << addr_str << \"$\" << raw_str;\n        }\n\n    }catch(std::runtime_error& e){\n        return false;\n    }\n\n    out_file.close();\n    ropgadget_file.close();\n    return true;\n}\n\n\n/* ========== Printing stuff ============== */\n\n// Colors \nstring g_ERROR_COLOR_ANSI = DEFAULT_ERROR_COLOR_ANSI;\nstring g_BOLD_COLOR_ANSI = DEFAULT_BOLD_COLOR_ANSI;\nstring g_SPECIAL_COLOR_ANSI = DEFAULT_SPECIAL_COLOR_ANSI;\nstring g_PAYLOAD_COLOR_ANSI = DEFAULT_PAYLOAD_COLOR_ANSI;\nstring g_EXPLOIT_DESCRIPTION_ANSI = DEFAULT_EXPLOIT_DESCRIPTION_ANSI;\nstring g_END_COLOR_ANSI = DEFAULT_END_COLOR_ANSI ;\n\n// String coloration \nstring str_bold(string s){\n    return g_BOLD_COLOR_ANSI + s + g_END_COLOR_ANSI; \n}\n\nstring str_special(string s){\n    return g_SPECIAL_COLOR_ANSI + s + g_END_COLOR_ANSI; \n}\n\nstring value_to_hex_str(int octets, addr_t addr){\n    char res[32], format[32];\n    // Get format (32 or 64 bits)\n    snprintf(format, sizeof(format), \"%%0%02dllx\", octets*2);\n    // Write hex bytes \n    snprintf(res, sizeof(res), format, addr);\n    return \"0x\"+string(res);\n}\n\nvoid disable_colors(){\n    g_ERROR_COLOR_ANSI = \"\";\n    g_BOLD_COLOR_ANSI = \"\";\n    g_SPECIAL_COLOR_ANSI = \"\";\n    g_PAYLOAD_COLOR_ANSI = \"\";\n    g_EXPLOIT_DESCRIPTION_ANSI = \"\";\n    g_END_COLOR_ANSI = \"\";\n}\n\nvoid enable_colors(){\n    g_ERROR_COLOR_ANSI = DEFAULT_ERROR_COLOR_ANSI;\n    g_BOLD_COLOR_ANSI = DEFAULT_BOLD_COLOR_ANSI;\n    g_SPECIAL_COLOR_ANSI = DEFAULT_SPECIAL_COLOR_ANSI;\n    g_PAYLOAD_COLOR_ANSI = DEFAULT_PAYLOAD_COLOR_ANSI;\n    g_EXPLOIT_DESCRIPTION_ANSI = DEFAULT_EXPLOIT_DESCRIPTION_ANSI;\n    g_END_COLOR_ANSI = DEFAULT_END_COLOR_ANSI ;    \n}\n\n\n\n/* ========= Catching ctrl+C ============= */\nstruct sigaction g_ropium_sigint_handler;\nstruct sigaction g_ropium_prev_sigint_handler;\nbool g_ropium_sigint_flag = false;\n\nvoid ropium_sigint_handler(int s){\n    g_ropium_sigint_flag = true;\n}\n\nvoid set_sigint_handler(){\n    g_ropium_sigint_handler.sa_handler = ropium_sigint_handler;\n    sigemptyset(&g_ropium_sigint_handler.sa_mask);\n    g_ropium_sigint_handler.sa_flags = 0;\n\n    sigaction(SIGINT, &g_ropium_sigint_handler, &g_ropium_prev_sigint_handler);\n}\n\nvoid unset_signint_handler(){\n    sigaction(SIGINT, &g_ropium_prev_sigint_handler, nullptr);\n}\n\nbool is_pending_sigint(){\n    return g_ropium_sigint_flag;\n}\n\nvoid notify_sigint_handled(){\n    g_ropium_sigint_flag = false;\n}\n"
  },
  {
    "path": "tests/ressources/gadgets.txt",
    "content": "0xaaaaaaa$505BFFE3\n0x12345678$89D8C3\n"
  },
  {
    "path": "tests/test_all.cpp",
    "content": "#include \"exception.hpp\"\n#include <string>\n#include <cstring>\n#include <iostream>\n#include <exception>\n\nusing std::cout;\nusing std::endl;\nusing std::string;\n\nvoid test_expression();\nvoid test_simplification();\nvoid test_ir();\nvoid test_database();\nvoid test_gadgets();\nvoid test_strategy();\nvoid test_il();\nvoid test_compiler();\n\nint main(int argc, char ** argv){\n    string bold = \"\\033[1m\";\n    string def = \"\\033[0m\";\n    string red = \"\\033[1;31m\";\n    string green = \"\\033[1;32m\";\n    \n    cout << bold << \"\\nRunnning ROPium unit-tests\" << def << endl\n                 <<   \"==========================\" << endl << endl;\n     for(int i = 0; i < 1; i++){\n        try{\n            if( argc == 1 ){\n            /* If no args specified, test all */\n                test_expression();\n                test_simplification();\n                test_ir();\n                test_gadgets();\n                test_database();\n                test_strategy();\n                test_il();\n                test_compiler();\n            }else{\n            /* Iterate through all options */\n                for( int i = 1; i < argc; i++){\n                    if( !strcmp(argv[i], \"expr\"))\n                        test_expression();\n                    else if (!strcmp(argv[i], \"simp\"))\n                        test_simplification();\n                    else if (!strcmp(argv[i], \"ir\"))\n                        test_ir();\n                    else if( !strcmp(argv[i], \"db\"))\n                        test_database();\n                    else if( !strcmp(argv[i], \"gadgets\"))\n                        test_gadgets();\n                    else if( !strcmp(argv[i], \"strategy\"))\n                        test_strategy();\n                    else if( !strcmp(argv[i], \"il\"))\n                        test_il();\n                    else if( !strcmp(argv[i], \"compiler\"))\n                        test_compiler();\n                    else\n                        std::cout << \"[\" << red << \"!\" << def << \"] Skipping unknown test: \" << argv[i] << std::endl;\n                }\n            }\n        }catch(test_exception& e){\n            cout << red << \"Fatal: Unit test failed\" << def << endl << endl;\n            return 1; \n        }\n    }\n    cout << endl;\n    return 0;\n}\n"
  },
  {
    "path": "tests/test_compiler.cpp",
    "content": "#include \"compiler.hpp\"\n#include \"arch.hpp\"\n#include \"exception.hpp\"\n#include <cassert>\n#include <iostream>\n#include <string>\n#include <sstream>\n#include <iomanip>\n\nusing std::cout;\nusing std::endl; \nusing std::string;\n\nnamespace test{\n    namespace compiler{\n        unsigned int _assert_ropchain(ROPChain* ropchain, const string& msg){\n            if( ropchain == nullptr){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            delete ropchain;\n            return 1; \n        }\n        \n        unsigned int _assert_no_ropchain(ROPChain* ropchain, const string& msg){\n            if( ropchain != nullptr){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            return 1; \n        }\n\n        unsigned int direct_match(){\n            unsigned int nb = 0;\n            ArchX86 arch;\n            GadgetDB db;\n            ROPCompiler comp = ROPCompiler(&arch, &db);\n            ROPChain* ropchain;\n\n            // Available gadgets\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x89\\xf9\\xbb\\x01\\x00\\x00\\x00\\xc3\", 8), 1)); // mov ecx, edi; mov ebx, 1; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC8\\xC3\", 3), 2)); // mov eax, ecx; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC3\\xC3\", 3), 3)); // mov ebx, eax; ret\n            raw.push_back(RawGadget(string(\"\\x01\\xf0\\x89\\xc3\\xc3\", 5), 4)); // add eax, esp; mov ebx, eax; ret\n            raw.push_back(RawGadget(string(\"\\xbb\\x04\\x00\\x00\\x00\\xc3\", 6), 5)); // mov ebx, 4; ret\n            raw.push_back(RawGadget(string(\"\\x83\\xc0\\x04\\x89\\xc3\\xc3\", 6), 6)); // add eax, 4; mov ebx, eax; ret\n            raw.push_back(RawGadget(string(\"\\x8b\\x59\\xf7\\x89\\xd8\\xc3\", 6), 7)); // mov ebx, [ecx-9]; mov eax, ebx; ret\n            raw.push_back(RawGadget(string(\"\\x03\\x39\\xc3\", 3), 8)); // add edi, [ecx]; ret\n            raw.push_back(RawGadget(string(\"\\xb9\\x0a\\x00\\x00\\x00\\xc3\", 6), 9)); // mov ecx, 10; ret\n            raw.push_back(RawGadget(string(\"\\x89\\x0f\\x89\\x5e\\xfd\\xc3\", 6), 10)); // mov [edi], ecx; mov [esi-3], ebx; ret\n            raw.push_back(RawGadget(string(\"\\xbe\\x16\\x00\\x00\\x00\\xc3\", 6), 11)); // mov esi, 22; ret\n            raw.push_back(RawGadget(string(\"\\xbf\\x78\\x56\\x34\\x12\\xc3\", 6), 12)); // mov edi, 0x12345678; ret\n            raw.push_back(RawGadget(string(\"\\x01\\x21\\xc3\", 3), 13)); // add [ecx], esp; ret\n            raw.push_back(RawGadget(string(\"\\x33\\x79\\xf6\\xc3\", 4), 14)); // xor edi, [ecx-10]; ret\n            raw.push_back(RawGadget(string(\"\\x83\\xc9\\xff\\xc3\", 4), 15)); // or ecx, 0xffffffff; ret\n            raw.push_back(RawGadget(string(\"\\x21\\x49\\xf7\\xc3\", 4), 16)); // and [ecx-9], ecx; ret\n            raw.push_back(RawGadget(string(\"\\x01\\x1E\\xC3\", 3), 17)); // add [esi], ebx; ret\n\n            db.analyse_raw_gadgets(raw, &arch);\n\n            // Test basic queries\n            ropchain = comp.compile(\"eax = ecx\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" ebx = 4\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" ebx = eax + 4\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" ebx = eax + esi \");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" ebx = [ ecx - 0x9] \");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" ebx = [ 1] \");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" [edi] = ecx\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" [ esi-   0x3] = ebx\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" [19] = ebx\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" [0x12345678] = ecx\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" edi += [ecx]\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" edi ^= [0]\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" [ecx+0x000 ] += esp  \\t\\t\\n\\t  \");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" ecx = -1\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" [22] += 4\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n\n            return nb;\n        }\n\n        unsigned int indirect_match(){\n            unsigned int nb = 0;\n            ArchX86 arch;\n            GadgetDB db;\n            ROPCompiler comp = ROPCompiler(&arch, &db);\n            ROPChain* ropchain;\n            Constraint constr;\n            constr.bad_bytes.add_bad_byte(0xff);\n\n            // Available gadgets\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x89\\xf9\\xbb\\x01\\x00\\x00\\x00\\xc3\", 8), 1)); // mov ecx, edi; mov ebx, 1; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC8\\xC3\", 3), 2)); // mov eax, ecx; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC3\\xC3\", 3), 3)); // mov ebx, eax; ret\n            raw.push_back(RawGadget(string(\"\\xb9\\xad\\xde\\x00\\x00\\xc3\", 6), 4)); // mov ecx, 0xdead; ret\n            raw.push_back(RawGadget(string(\"\\x5f\\x5e\\x59\\xc3\", 4), 5)); // pop edi; pop esi; pop ecx; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xE8\\xFF\\xE6\", 4), 6)); // mov eax, ebp; jmp esi\n            raw.push_back(RawGadget(string(\"\\x89\\xF1\\xFF\\xE0\", 4), 7)); // mov ecx, esi; jmp eax\n            raw.push_back(RawGadget(string(\"\\x5A\\x59\\xC3\", 3), 8)); // pop edx; pop ecx; ret\n            raw.push_back(RawGadget(string(\"\\x8B\\x40\\x08\\xC3\", 4), 9)); // mov eax, [eax + 8]; ret\n            raw.push_back(RawGadget(string(\"\\x8D\\x4B\\x08\\xC3\", 4), 10)); // lea ecx, [ebx + 8]; ret\n            raw.push_back(RawGadget(string(\"\\x8D\\x40\\x20\\xFF\\xE1\", 5), 11)); // lea eax, [eax + 32]; jmp ecx;\n            raw.push_back(RawGadget(string(\"\\x89\\x43\\x08\\xC3\", 4), 12)); // mov [ebx + 8], eax; ret\n\n            db.analyse_raw_gadgets(raw, &arch);\n\n            // Test mov_reg_transitivity\n            ropchain = comp.compile(\"eax = edi\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\"ebx = edi\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n\n            // Test mov_cst_transitivity\n            ropchain = comp.compile(\"eax = 0xdead\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\"ebx = 0xdead\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            \n            // Test mov_cst pop\n            ropchain = comp.compile(\" edi =   -2\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" eax = 0x12345678  \");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            \n            // Test generic adjust jmp\n            ropchain = comp.compile(\" ebx =  ebp\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" eax =  esi\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            \n            // Test adjust load\n            ropchain = comp.compile(\" eax =  [ebx+16]\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            ropchain = comp.compile(\" eax =  [eax+40]\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n\n            // Test src transitivity\n            ropchain = comp.compile(\" [ebx+8] =  ebp\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            \n            // Test adjust store\n            ropchain = comp.compile(\" [eax+8] =  ecx\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n\n            return nb;\n        }\n        \n        unsigned int store_string(){\n            unsigned int nb = 0;\n            \n            ArchX86 arch;\n            GadgetDB db;\n            ROPCompiler comp = ROPCompiler(&arch, &db);\n            ROPChain* ropchain;\n            Constraint constr;\n            constr.bad_bytes.add_bad_byte(0xff);\n\n            // Available gadgets\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x89\\xf9\\xbb\\x01\\x00\\x00\\x00\\xc3\", 8), 1)); // mov ecx, edi; mov ebx, 1; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC8\\xC3\", 3), 2)); // mov eax, ecx; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC3\\xC3\", 3), 3)); // mov ebx, eax; ret\n            raw.push_back(RawGadget(string(\"\\xb9\\xad\\xde\\x00\\x00\\xc3\", 6), 4)); // mov ecx, 0xdead; ret\n            raw.push_back(RawGadget(string(\"\\x5f\\x5e\\x59\\xc3\", 4), 5)); // pop edi; pop esi; pop ecx; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xE8\\xFF\\xE6\", 4), 6)); // mov eax, ebp; jmp esi\n            raw.push_back(RawGadget(string(\"\\x89\\xF1\\xFF\\xE0\", 4), 7)); // mov ecx, esi; jmp eax\n            raw.push_back(RawGadget(string(\"\\x5A\\x59\\xC3\", 3), 8)); // pop edx; pop ecx; ret\n            raw.push_back(RawGadget(string(\"\\x8B\\x40\\x08\\xC3\", 4), 9)); // mov eax, [eax + 8]; ret\n            raw.push_back(RawGadget(string(\"\\x8D\\x4B\\x08\\xC3\", 4), 10)); // lea ecx, [ebx + 8]; ret\n            raw.push_back(RawGadget(string(\"\\x8D\\x40\\x20\\xFF\\xE1\", 5), 11)); // lea eax, [eax + 32]; jmp ecx;\n            raw.push_back(RawGadget(string(\"\\x89\\x43\\x08\\xC3\", 4), 12)); // mov [ebx + 8], eax; ret\n\n            db.analyse_raw_gadgets(raw, &arch);\n\n            // Test adjust store\n            ropchain = comp.compile(\" [0x1234] =  'lala'\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n            \n            ropchain = comp.compile(\" [0x1234] =  'lalatotoo\\\\x00'\");\n            nb += _assert_ropchain(ropchain, \"Failed to find ropchain\");\n\n            return nb;\n        }\n        \n        unsigned int incorrect_match(){\n            unsigned int nb = 0;\n            ArchX86 arch;\n            GadgetDB db;\n            ROPCompiler comp = ROPCompiler(&arch, &db);\n            ROPChain* ropchain;\n            Constraint constr;\n            constr.bad_bytes.add_bad_byte(0xff);\n\n            // Test when adjust gadget clobbers reg that must be set\n            // Here gadget 2 and 3 both modify ecx\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x89\\xF1\\xFF\\xE0\", 4), 1)); // mov ecx, esi; jmp eax\n            raw.push_back(RawGadget(string(\"\\x59\\xC3\", 2), 2)); // pop ecx; ret\n            raw.push_back(RawGadget(string(\"\\x58\\x59\\xC3\", 3), 3)); // pop eax; pop ecx; ret\n            db.analyse_raw_gadgets(raw, &arch);\n            ropchain = comp.compile(\" ecx =  esi\");\n            nb += _assert_no_ropchain(ropchain, \"Found ropchain but no ropchain should exist\");\n            \n            // Test when adjust gadget clobbers input register\n            // Here gadget 2 can set eax but modifies esi\n            db.clear();\n            raw.clear();\n            raw.push_back(RawGadget(string(\"\\x89\\xF1\\xFF\\xE0\", 4), 1)); // mov ecx, esi; jmp eax\n            raw.push_back(RawGadget(string(\"\\x5E\\x58\\xC3\", 3), 2)); // pop esi; pop eax; ret\n            raw.push_back(RawGadget(string(\"\\xC3\", 1), 3)); // ret\n            db.analyse_raw_gadgets(raw, &arch);\n            ropchain = comp.compile(\" ecx =  esi\");\n            nb += _assert_no_ropchain(ropchain, \"Found ropchain but no ropchain should exist\");\n\n            return nb;\n        }\n        \n        unsigned int function_call_x86(){\n            unsigned int nb = 0;\n            ArchX86 arch;\n            GadgetDB db;\n            ROPCompiler comp = ROPCompiler(&arch, &db);\n            ROPChain* ropchain;\n            Constraint constr;\n            constr.bad_bytes.add_bad_byte(0xff);\n            // Test when adjust gadget clobbers reg that must be set\n            // Here gadget 2 and 3 both modify ecx\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x58\\xFF\\xE0\", 3), 1)); // pop eax; jmp eax\n            raw.push_back(RawGadget(string(\"\\xC3\", 1), 2)); // ret\n            raw.push_back(RawGadget(string(\"\\x59\\xC3\", 2), 3)); // pop ecx; ret\n            raw.push_back(RawGadget(string(\"\\x83\\xC4\\x0C\\xC3\", 4), 4)); // add esp, 12; ret\n            db.analyse_raw_gadgets(raw, &arch);\n\n            // X86 CDECL ABI\n            ropchain = comp.compile(\" 0x1234(42)\", nullptr, ABI::X86_CDECL);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n\n            ropchain = comp.compile(\" 0x12345678(42, -1, 43)\", nullptr, ABI::X86_CDECL);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n\n            ropchain = comp.compile(\" 0()\", nullptr, ABI::X86_CDECL);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n            \n            // X86 STDCALL ABI\n            ropchain = comp.compile(\" 0x1234(42)\", nullptr, ABI::X86_STDCALL);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n\n            ropchain = comp.compile(\" 0x12345678(42, -1, 43)\", nullptr, ABI::X86_STDCALL);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n\n            ropchain = comp.compile(\" 0()\", nullptr, ABI::X86_STDCALL);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n            \n            return nb;\n        }\n\n        unsigned int function_call_x64(){\n            unsigned int nb = 0;\n            ArchX64 arch;\n            GadgetDB db;\n            ROPCompiler comp = ROPCompiler(&arch, &db);\n            ROPChain* ropchain;\n            Constraint constr;\n            constr.bad_bytes.add_bad_byte(0xff);\n            // Test when adjust gadget clobbers reg that must be set\n            // Here gadget 2 and 3 both modify ecx\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x58\\xFF\\xE0\", 3), 1)); // pop rax; jmp rax\n            raw.push_back(RawGadget(string(\"\\xC3\", 1), 2)); // ret\n            raw.push_back(RawGadget(string(\"\\x5F\\xC3\", 2), 3)); // pop rdi; ret\n            raw.push_back(RawGadget(string(\"\\x48\\x83\\xC4\\x08\\xC3\", 5), 4)); // add rsp, 8; ret\n            raw.push_back(RawGadget(string(\"\\x5E\\xC3\", 2), 5)); // pop rsi; ret\n            raw.push_back(RawGadget(string(\"\\x5A\\xC3\", 2), 6)); // pop rdx; ret\n            raw.push_back(RawGadget(string(\"\\x59\\xC3\", 2), 7)); // pop rcx; ret\n            raw.push_back(RawGadget(string(\"\\x41\\x58\\xc3\", 3), 8)); // pop r8; ret\n            raw.push_back(RawGadget(string(\"\\x41\\x59\\x59\\xc3\", 4), 9)); // pop r9; pop rcx; ret\n            raw.push_back(RawGadget(string(\"\\x48\\x89\\xef\\xc3\",4), 10)); // mov rdi, rbp; ret\n            raw.push_back(RawGadget(string(\"\\x48\\x83\\xC4\\x18\\xC3\", 5), 11)); // add rsp, 24; ret\n            raw.push_back(RawGadget(string(\"\\x49\\x89\\xd1\\xc3\",4), 12)); // mov r9, rdx; ret\n            raw.push_back(RawGadget(string(\"\\x48\\x83\\xC4\\x10\\xC3\", 5), 13)); // add rsp, 16; ret\n            db.analyse_raw_gadgets(raw, &arch);\n\n            // X64 SYSTEM V\n            ropchain = comp.compile(\" 0x1234(42)\", nullptr, ABI::X64_SYSTEM_V);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n            \n            ropchain = comp.compile(\" 0x1234(1, 2, 3, 4)\", nullptr, ABI::X64_SYSTEM_V);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n            \n            ropchain = comp.compile(\" 0x1234(rbp, 2, 3, 4, 5, 6)\", nullptr, ABI::X64_SYSTEM_V);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n            \n            ropchain = comp.compile(\" 0x1234(rbp, 2, 3, 4, 5, 6, 7, 8)\", nullptr, ABI::X64_SYSTEM_V);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n            \n            ropchain = comp.compile(\" 0x1234(rbp, 2, 3, 4, 5, 6, 7, 8, 9)\", nullptr, ABI::X64_SYSTEM_V);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n\n            // X64 Microsoft\n            ropchain = comp.compile(\" 0x1234(42)\", nullptr, ABI::X64_MS);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n            \n            ropchain = comp.compile(\" 0x1234(1, 2, 3, rdx)\", nullptr, ABI::X64_MS);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n            \n            ropchain = comp.compile(\" 0x1234(1, 2, 3, 4, 5, 6)\", nullptr, ABI::X64_MS);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n            \n            ropchain = comp.compile(\" 0x1234(1, 2, 3, rdx, 5, 6, 7)\", nullptr, ABI::X64_MS);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to call function\");\n\n            return nb;\n        }\n\n        unsigned int syscall_x86(){\n            unsigned int nb = 0;\n            ArchX86 arch;\n            GadgetDB db;\n            ROPCompiler comp = ROPCompiler(&arch, &db);\n            ROPChain* ropchain;\n            Constraint constr;\n            constr.bad_bytes.add_bad_byte(0xff);\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x58\\xC3\", 2), 1)); // pop eax; ret\n            raw.push_back(RawGadget(string(\"\\x5B\\xC3\", 2), 2)); // pop ebx; ret\n            raw.push_back(RawGadget(string(\"\\x83\\xC5\\x20\\x0F\\x34\", 5), 3)); // add ebp, 32; sysenter\n            raw.push_back(RawGadget(string(\"\\x59\\xC3\", 2), 4)); // pop ecx; ret\n            raw.push_back(RawGadget(string(\"\\x59\\x5A\\xC3\", 3), 5)); // pop ecx; pop edx; ret\n            raw.push_back(RawGadget(string(\"\\x58\\x89\\xC6\\xC3\", 4), 6)); // pop eax; mov esi, eax; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xDA\\xC3\", 3), 7)); // mov edx, ebx; ret\n            db.analyse_raw_gadgets(raw, &arch);\n\n            // X86 Linux\n            ropchain = comp.compile(\" sys_exit(1)\", nullptr, ABI::NONE, System::LINUX);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to make syscall\");\n\n            ropchain = comp.compile(\" sys_execve( 3, 2, 1)\", nullptr, ABI::NONE, System::LINUX);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to make syscall\");\n\n            ropchain = comp.compile(\" sys_execve( 3, 2, ebx)\", nullptr, ABI::NONE, System::LINUX);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to make syscall\");\n\n            ropchain = comp.compile(\" sys_ptrace( 3, 2, 1, 2)\", nullptr, ABI::NONE, System::LINUX);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to make syscall\");\n\n            ropchain = comp.compile(\"  sys_123(1, 2, 3) \", nullptr, ABI::NONE, System::LINUX);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to make syscall\");\n\n            return nb;\n        }\n        \n        unsigned int syscall_x64(){\n            unsigned int nb = 0;\n            ArchX64 arch;\n            GadgetDB db;\n            ROPCompiler comp = ROPCompiler(&arch, &db);\n            ROPChain* ropchain;\n            Constraint constr;\n            constr.bad_bytes.add_bad_byte(0xff);\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x58\\xC3\", 2), 1)); // pop rax; ret\n            raw.push_back(RawGadget(string(\"\\x5F\\xC3\", 2), 2)); // pop rdi; ret\n            raw.push_back(RawGadget(string(\"\\x83\\xC5\\x20\\x0F\\x05\", 5), 3)); // add ebp, 32; syscall\n            raw.push_back(RawGadget(string(\"\\x5E\\xC3\", 2), 4)); // pop rsi; ret\n            raw.push_back(RawGadget(string(\"\\x59\\x5A\\xC3\", 3), 5)); // pop rcx; pop rdx; ret\n            raw.push_back(RawGadget(string(\"\\x58\\x48\\x89\\xC6\\xC3\", 5), 6)); // pop rax; mov rsi, rax; ret\n            raw.push_back(RawGadget(string(\"\\x48\\x89\\xF2\\xC3\", 4), 7)); // mov rdx, rsi; ret\n            db.analyse_raw_gadgets(raw, &arch);\n\n            // X64 Linux\n            ropchain = comp.compile(\" sys_exit(1)\", nullptr, ABI::NONE, System::LINUX);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to make syscall\");\n            \n            ropchain = comp.compile(\" sys_execve(1, 2, 3)\", nullptr, ABI::NONE, System::LINUX);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to make syscall\");\n            \n            ropchain = comp.compile(\" sys_execve(1, 2, rsi)\", nullptr, ABI::NONE, System::LINUX);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to make syscall\");\n\n            ropchain = comp.compile(\" sys_0x42(1, 2, rsi)\", nullptr, ABI::NONE, System::LINUX);\n            nb += _assert_ropchain(ropchain, \"Couldn't build ropchain to make syscall\");\n\n            return nb;\n        }\n        \n    }\n}\n\nusing namespace test::compiler; \n// All unit tests \nvoid test_compiler(){\n    unsigned int total = 0;\n    string green = \"\\033[1;32m\";\n    string def = \"\\033[0m\";\n    string bold = \"\\033[1m\";\n    \n    // Start testing \n    cout << bold << \"[\" << green << \"+\" << def << bold << \"]\" << def << std::left << std::setw(34) << \" Testing ROP compiler... \" << std::flush;  \n    total += direct_match();\n    total += indirect_match();\n    total += function_call_x86();\n    total += function_call_x64();\n    total += syscall_x86();\n    total += syscall_x64();\n    total += store_string();\n    total += incorrect_match();\n    // Return res\n    cout << \"\\t\" << total << \"/\" << total << green << \"\\t\\tOK\" << def << endl;\n}\n"
  },
  {
    "path": "tests/test_database.cpp",
    "content": "#include \"database.hpp\"\n#include \"exception.hpp\"\n#include <string>\n#include <sstream>\n#include <tuple>\n#include <iostream>\n#include <iomanip>\n#include <algorithm>\n\nusing std::cout;\nusing std::endl; \nusing std::string;\nusing std::make_tuple;\nusing std::tuple;\n\nnamespace test{\n    namespace database{\n        unsigned int _assert(bool val, const string& msg){\n            if( !val){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            return 1; \n        }\n        \n        unsigned int base_db(){\n            BaseDB<tuple<int, int>> db;\n            int nb = 0;\n            Gadget *g1 = new Gadget(), *g2 = new Gadget();\n            vector<Gadget*> all;\n            g1->id = 0; g2->id = 1;\n            all.push_back(g1);\n            all.push_back(g2);\n            \n            db.add(make_tuple(1,2), g1);\n            nb += _assert(db.get(make_tuple(1,2))[0] == g1, \"BaseDB, failed to add then get gadget\");\n            db.add(make_tuple(1,4456), g2);\n            nb += _assert(db.get(make_tuple(1,4456))[0] == g2, \"BaseDB, failed to add then get gadget\");\n\n            delete g1; delete g2;\n            return nb; \n        }\n        \n        unsigned int _assert_db(addr_t addr, const vector<Gadget*>& list){\n            for( Gadget* g : list ){\n                if( std::find(g->addresses.begin(), g->addresses.end(), addr) != g->addresses.end() )\n                    return 1;\n            }\n            cout << \"\\nFail: \" << \"GadgetDB: failed to classify/return gadget correctly\" << endl << std::flush; \n                throw test_exception();\n        }\n        \n        unsigned int classification(){\n            unsigned int nb = 0;\n            Arch* arch = new ArchX86();\n            GadgetDB db;\n\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\xb8\\x03\\x00\\x00\\x00\\xc3\", 6), 0)); // mov eax, 3; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xf9\\xbb\\x01\\x00\\x00\\x00\\xc3\", 8), 1)); // mov ecx, edi; mov ebx, 1; ret\n            raw.push_back(RawGadget(string(\"\\xb8\\x03\\x00\\x00\\x00\\xc3\", 6), 2)); // mov eax, 3; ret\n            raw.push_back(RawGadget(string(\"\\x83\\xc0\\x02\\x89\\xc6\\xc3\", 6), 3)); // add eax, 2; mov esi, eax; ret\n            raw.push_back(RawGadget(string(\"\\x81\\xea\\x34\\x12\\x00\\x00\\xc3\", 7), 4)); // sub edx, 0x1234; ret\n            raw.push_back(RawGadget(string(\"\\x01\\xe5\\xc3\", 3), 5)); // add ebp, esp; ret\n            raw.push_back(RawGadget(string(\"\\x58\\x5e\\xc3\", 3), 6)); // pop eax; pop esi; ret\n            raw.push_back(RawGadget(string(\"\\x8b\\x59\\xf7\\x89\\xd8\\xc3\", 6), 7)); // mov ebx, [ecx-9]; mov eax, ebx; ret\n            raw.push_back(RawGadget(string(\"\\x03\\x39\\xc3\", 3), 8)); // add edi, [ecx]; ret\n            raw.push_back(RawGadget(string(\"\\x33\\x79\\xf6\\xc3\", 4), 9)); // xor edi, [ecx-10]; ret\n            raw.push_back(RawGadget(string(\"\\xff\\xe0\", 2), 10)); // jmp eax;\n            raw.push_back(RawGadget(string(\"\\x89\\x0f\\x89\\x5e\\xfd\\xc3\", 6), 11)); // mov [edi], ecx; mov [esi-3], ebx; ret\n            raw.push_back(RawGadget(string(\"\\x01\\x21\\xc3\", 3), 12)); // add [ecx], esp; ret\n            raw.push_back(RawGadget(string(\"\\x21\\x49\\xf7\\xc3\", 4), 13)); // and [ecx-9], ecx; ret\n            raw.push_back(RawGadget(string(\"\\x83\\xC0\\x03\\xCD\\x80\", 5), 14)); // add eax, 3; int 0x80\n            raw.push_back(RawGadget(string(\"\\x83\\xC5\\x20\\x0F\\x34\", 5), 15)); // add ebp, 32; sysenter\n\n            db.analyse_raw_gadgets(raw, arch);\n\n            // Test gadget classification\n            nb += _assert_db(0, db.get_mov_cst(X86_EAX, 3));\n            nb += _assert_db(1, db.get_mov_cst(X86_EBX, 1));\n            nb += _assert_db(1, db.get_mov_reg(X86_ECX, X86_EDI));\n            nb += _assert_db(2, db.get_mov_cst(X86_EAX, 3));\n            nb += _assert_db(3, db.get_amov_cst(X86_EAX, X86_EAX, Op::ADD, 2));\n            nb += _assert_db(3, db.get_amov_cst(X86_ESI, X86_EAX, Op::ADD, 2));\n            nb += _assert_db(4, db.get_amov_cst(X86_EDX, X86_EDX, Op::ADD, -0x1234));\n            nb += _assert_db(5, db.get_amov_reg(X86_EBP, X86_ESP, Op::ADD, X86_EBP));\n            nb += _assert_db(6, db.get_load(X86_EAX, X86_ESP, 0));\n            nb += _assert_db(6, db.get_load(X86_ESI, X86_ESP, 4));\n            nb += _assert_db(7, db.get_load(X86_EBX, X86_ECX, -9));\n            nb += _assert_db(7, db.get_load(X86_EAX, X86_ECX, -9));\n            nb += _assert_db(8, db.get_aload(X86_EDI, Op::ADD, X86_ECX, 0));\n            nb += _assert_db(9, db.get_aload(X86_EDI, Op::XOR, X86_ECX, -10));\n            nb += _assert_db(10, db.get_jmp(X86_EAX));\n            nb += _assert_db(10, db.get_mov_reg(X86_EIP, X86_EAX));\n            nb += _assert_db(11, db.get_store(X86_EDI, 0, X86_ECX));\n            nb += _assert_db(11, db.get_store(X86_ESI, -3, X86_EBX));\n            nb += _assert_db(12, db.get_astore(X86_ECX, 0, Op::ADD, X86_ESP));\n            nb += _assert_db(13, db.get_astore(X86_ECX, -9, Op::AND, X86_ECX));\n            nb += _assert_db(14, db.get_int80());\n            nb += _assert_db(15, db.get_syscall());\n\n            delete arch;\n            return nb;\n        }\n        \n        unsigned int classification_x64(){\n            unsigned int nb = 0;\n            Arch* arch = new ArchX64();\n            GadgetDB db;\n\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x83\\xC5\\x20\\x0F\\x05\", 5), 1)); // add ebp, 32; syscall\n\n            db.analyse_raw_gadgets(raw, arch);\n\n            // Test gadget classification\n            nb += _assert_db(1, db.get_syscall());\n\n            delete arch;\n            return nb;\n        }\n\n    }\n}\n\nusing namespace test::database; \n// All unit tests \nvoid test_database(){\n    unsigned int total = 0;\n    string green = \"\\033[1;32m\";\n    string def = \"\\033[0m\";\n    string bold = \"\\033[1m\";\n    \n    // Start testing \n    cout << bold << \"[\" << green << \"+\" << def << bold << \"]\" << def << std::left << std::setw(34) << \" Testing gadget database... \" << std::flush;  \n    total += base_db();\n    total += classification();\n    total += classification_x64();\n    // Return res\n    cout << \"\\t\" << total << \"/\" << total << green << \"\\t\\tOK\" << def << endl;\n}\n"
  },
  {
    "path": "tests/test_expression.cpp",
    "content": "#include \"expression.hpp\"\n#include \"simplification.hpp\"\n#include \"exception.hpp\"\n#include <cassert>\n#include <iostream>\n#include <string>\n#include <sstream>\n#include <iomanip>\n\nusing std::cout;\nusing std::endl; \nusing std::string;\n\nnamespace test{\n    namespace expression{\n        // Individual unit tests\n        unsigned int basic(){\n            Expr e1, e2, e3, e4, e5, e6, e7, e8; \n            for( int i = 0; i < 10; i++){\n                e1 = exprcst(32, -1);\n                e2 = exprcst(32, 1048567);\n                e3 = exprmem(32, e2);\n                e4 = -e1;\n                e5 = e2 - e1;\n                e6 = extract(e1, 31, 23);\n                e7 = e6;\n            }\n            return 0;\n        }\n\n        /* Expression hashing */\n        unsigned int _assert_hash_eq(Expr e1, Expr e2){\n            if( e1->hash() != e2->hash() ){\n                cout << endl << \"Fail: _assert_hash_eq: \" << e1 << \" == \" << e2 << endl;\n                throw test_exception();  \n            }\n            return 1; \n        }\n        \n        unsigned int _assert_hash_neq(Expr e1, Expr e2){\n            if( e1->hash() == e2->hash() ){\n                cout << endl << \"Fail: _assert_hash_eq: \" << e1 << \" == \" << e2 << endl;\n                throw test_exception();  \n            }\n            return 1; \n        }\n        unsigned hashing(){\n            Expr    e1 = exprcst(32,1),\n                    e2 = exprvar(32, \"var1\"),\n                    e3 = exprmem(32, e2),\n                    e4 = -e1,\n                    e5 = e2 & e3,\n                    e6 = exprmem(32, e5),\n                    e7 = exprmem(32, e6),\n                    e8 = bisz(32, e5, 1),\n                    e9 = e3 % e5;;\n            unsigned int nb = 0;\n            // Hash equality \n            nb += _assert_hash_eq(e1, exprcst(32,1));\n            nb += _assert_hash_eq(e2, exprvar(32, \"var1\"));\n            nb += _assert_hash_eq(e3, exprmem(32, e2));\n            nb += _assert_hash_eq(e4, (-e1));\n            nb += _assert_hash_eq(e5, (e2 & e3));\n            nb += _assert_hash_eq(e6, exprmem(32, e5));\n            nb += _assert_hash_eq(e7, exprmem(32,e6));\n            nb += _assert_hash_eq(e8, bisz(32, e5, 1));\n            nb += _assert_hash_eq(e9, e3%e5);\n            // Hash inequality\n            nb += _assert_hash_neq(e1, e2);\n            nb += _assert_hash_neq(e2,e3);\n            nb += _assert_hash_neq(e3,e4);\n            nb += _assert_hash_neq(e4,e5);\n            nb += _assert_hash_neq(e5,e6);\n            nb += _assert_hash_neq(e6,e7);\n            nb += _assert_hash_neq(e8, bisz(32, e5, 0));\n            nb += _assert_hash_neq(e9, e5%e3);\n            return nb;\n        }\n        \n        /* Expression Canonization */\n        unsigned int _assert_canonize_eq(Expr e1, Expr e2 ){\n            Expr tmp1 = expr_canonize(e1), tmp2 = expr_canonize(e2);\n            if(!(tmp1->eq(tmp2))){\n                cout << endl << \"Fail:  _assert_canonize_eq: \" << e1 << \" <==> \" << e2 << endl\n                << \"Note: canonized as : \" << tmp1 << \" <==> \" << tmp2 << endl << std::flush;\n                throw test_exception(); \n            }\n            return 1;\n        }\n        unsigned int _assert_canonize_neq(Expr e1, Expr e2 ){\n            Expr tmp1 = expr_canonize(e1), tmp2 = expr_canonize(e2);\n            if(!tmp1->neq(tmp2)){\n                cout << endl << \"Fail:  _assert_canonize_neq: \" << e1 << \" <=/=> \" << e2 << endl\n                << \"Note: canonized as : \" << tmp1 << \" <==> \" << tmp2 << endl << std::flush;\n                throw test_exception(); \n            }\n            return 1;\n        }\n        unsigned int canonize(){\n            Expr    cst1 = exprcst(32, 1),\n                    cst2 = exprcst(32, 567),\n                    var1 = exprvar(32, \"var1\"),\n                    var2 = exprvar(32, \"var2\"),\n                    var3 = exprvar(32, \"var3\"),\n                    un1 = -var2,\n                    bin1 = var1+var2,\n                    bin2 = var3/var2,\n                    bin3 = sdiv(var3,var2); \n            unsigned int nb = 0;\n            // a+b == b+a \n            nb += _assert_canonize_eq((cst1+cst2), (cst2+cst1));\n            nb += _assert_canonize_eq((cst1+var1), (var1+cst1));\n            nb += _assert_canonize_eq((bin3+var1), (var1+bin3));\n            nb += _assert_canonize_eq((bin1+bin2), (bin2+bin1));\n            nb += _assert_canonize_eq((bin1+bin1), (bin1+bin1)); \n            // a*b == b*a\n            nb += _assert_canonize_eq((cst1*cst2), (cst2*cst1));\n            nb += _assert_canonize_eq((cst1*var1), (var1*cst1));\n            // (a^b)^c == (c^b)^a\n            nb += _assert_canonize_eq( cst1^var1^bin3, cst1^bin3^var1);\n            // a/b/c == a/c/b\n            nb += _assert_canonize_eq( var2/var3/un1, var2/un1/var3);\n            // a/b != b/a \n            nb += _assert_canonize_neq(var3/cst1, cst1/var3);\n            // a<<b != b<<a\n            nb += _assert_canonize_neq(shl(bin1,bin2), shl(bin2, bin1));\n            // a-b-c == a-c-b\n            nb += _assert_canonize_eq(cst1-var1-bin3, cst1-bin3-var1);\n            // a-b != b-a\n            nb += _assert_canonize_neq(cst2-un1, un1-cst2);\n            // Concat reordering\n            nb += _assert_canonize_eq(concat(var1, concat(var2, var3)), concat(concat(var1, var2), var3));\n            return nb;  \n        };\n        \n\n        unsigned int _assert(bool val, const string& msg){\n            if( !val){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            return 1; \n        }\n\n        /* Concretization */\n        unsigned int concretization(){\n            unsigned int nb = 0; \n            VarContext ctx = VarContext(0);\n            VarContext ctx2 = VarContext(1);\n            Expr    v1 = exprvar(32, \"var1\" ),\n                    v2 = exprvar(32, \"var2\"),\n                    v3 = exprvar(64, \"var3\"),\n                    v4 = exprvar(64, \"var4\"),\n                    e1 = v1|v2, \n                    e2 = v3+v4,\n                    e3 = extract(v1, 8, 1),\n                    e4 = concat(v2,v1); \n            ctx.set(\"var1\", 10);\n            ctx.set(\"var2\", -2);\n            ctx.set(\"var3\", 0xffff000000000000);\n            ctx.set(\"var4\", 0x0000ffffffffffff);\n            \n            ctx2.set(\"var1\", 7);\n            ctx2.set(\"var2\", -1);\n            ctx2.set(\"var3\", 0xeeee000000000000);\n            ctx2.set(\"var4\", 0x0000eeeeeeeeeeee);\n            \n            nb += _assert( v1->concretize(&ctx) == 10, \"Concretization gave wrong result\");\n            nb += _assert( v2->concretize(&ctx) == -2, \"Concretization gave wrong result\"); \n            nb += _assert( v3->concretize(&ctx) == 0xffff000000000000, \"Concretization gave wrong result\"); \n            nb += _assert( v4->concretize(&ctx) == 0x0000ffffffffffff, \"Concretization gave wrong result\"); \n            \n            nb += _assert( (v1+v2)->concretize(&ctx) == exprcst(32, 8)->concretize(&ctx), \"Concretization gave wrong result\"); \n            nb += _assert( (v1*v2)->concretize(&ctx) == exprcst(32, -20)->concretize(&ctx), \"Concretization gave wrong result\"); \n            nb += _assert( (v1/v2)->concretize(&ctx) == exprcst(32, 0)->concretize(&ctx), \"Concretization gave wrong result\"); \n            nb += _assert( sdiv(v1,v2)->concretize(&ctx) == exprcst(32, -5)->concretize(&ctx), \"Concretization gave wrong result\"); \n            nb += _assert( (v1^v2)->concretize(&ctx) == exprcst(32, 10^-2)->concretize(&ctx), \"Concretization gave wrong result\");\n            nb += _assert( (v1|v2)->concretize(&ctx) == exprcst(32, 10|-2)->concretize(&ctx), \"Concretization gave wrong result\");\n            nb += _assert( extract(v2,31,24)->concretize(&ctx) == exprcst(8, 0xff)->concretize(&ctx), \"Concretization gave wrong result\");\n            nb += _assert( shr(v1,exprcst(32, 2))->concretize(&ctx) == 2, \"Concretization gave wrong result\");\n            nb += _assert( shl(exprcst(32, 0x800000001),exprcst(32, 2))->concretize(&ctx) == 4, \"Concretization gave wrong result\");\n            nb += _assert( concat(v1,v2)->concretize(&ctx) == 0x0000000afffffffe, \"Concretization gave wrong result\");\n            nb += _assert( bisz(16, v1, 1)->concretize(&ctx) == 0, \"Concretization gave wrong result\");\n            nb += _assert( bisz(16, v1, 0)->concretize(&ctx) == 1, \"Concretization gave wrong result\");\n            nb += _assert( bisz(4, exprcst(26, 0), 1)->concretize(&ctx) == 1, \"Concretization gave wrong result\");\n            nb += _assert( bisz(4, exprcst(26, 0), 0)->concretize(&ctx) == 0, \"Concretization gave wrong result\");\n            \n            nb += _assert( smod(exprcst(32, -6), exprcst(32, 5))->concretize(&ctx) == -1, \"Concretization gave wrong result\");\n            nb += _assert( smod(exprcst(32, -10), exprcst(32,3))->concretize(&ctx) == -1, \"Concretization gave wrong result\");\n            nb += _assert( smod(exprcst(32, 10), exprcst(32,-3))->concretize(&ctx) == 1, \"Concretization gave wrong result\");\n            \n            // multiplications\n            nb += _assert( mulh(exprcst(64, 0xbbf543), exprcst(64, 0xfffffabc7865))->concretize(&ctx) == 0xbb, \"Concretization gave wrong result\");\n            nb += _assert( mulh(exprcst(32, 0xbbf543), exprcst(32, 0xc7865))->concretize(&ctx) == 0x927, \"Concretization gave wrong result\");\n            nb += _assert( smull(exprcst(8, 48), exprcst(8, 4))->concretize(&ctx) == 0xffffffffffffffc0, \"Concretization gave wrong result\");\n            nb += _assert( smulh(exprcst(8, 48), exprcst(8, 4))->concretize(&ctx) == 0, \"Concretization gave wrong result\");\n            nb += _assert( smull(exprcst(8, -4), exprcst(8, 4))->concretize(&ctx) == 0xfffffffffffffff0, \"Concretization gave wrong result\");\n            nb += _assert( smulh(exprcst(8, -4), exprcst(8, 4))->concretize(&ctx) == 0xffffffffffffffff, \"Concretization gave wrong result\");\n            nb += _assert( smull(exprcst(16, 48), exprcst(16, 4))->concretize(&ctx) == 0xc0, \"Concretization gave wrong result\");\n            nb += _assert( smulh(exprcst(16, 48), exprcst(16, 4))->concretize(&ctx) == 0, \"Concretization gave wrong result\");\n            nb += _assert( smull(exprcst(32, 4823424), exprcst(32, -423))->concretize(&ctx) == 0xffffffff86635D80, \"Concretization gave wrong result\");\n            nb += _assert( smulh(exprcst(32, 4823424), exprcst(32, -423))->concretize(&ctx) == 0xffffffffffffffff, \"Concretization gave wrong result\");\n            nb += _assert( smull(exprcst(32, -1), exprcst(32, -1))->concretize(&ctx) == 1, \"Concretization gave wrong result\");\n            nb += _assert( smulh(exprcst(32, -1), exprcst(32, -1))->concretize(&ctx) == 0, \"Concretization gave wrong result\");\n            \n            \n            nb += _assert( (-v3)->concretize(&ctx) == 0x0001000000000000, \"Concretization gave wrong result\");\n            nb += _assert( (~v4)->concretize(&ctx) == 0xffff000000000000, \"Concretization gave wrong result\");  \n            nb += _assert( (v3^v4)->concretize(&ctx) == -1, \"Concretization gave wrong result\");\n            nb += _assert( (v4&v3)->concretize(&ctx) == 0, \"Concretization gave wrong result\");\n            nb += _assert( (v3|v4)->concretize(&ctx) == -1, \"Concretization gave wrong result\");\n            nb += _assert( (v3*v4)->concretize(&ctx) == 0xffff000000000000*0x0000ffffffffffff, \"Concretization gave wrong result\");\n            nb += _assert(( exprcst(32, 23)%exprcst(32, 2))->concretize(&ctx) == 1, \"Concretization gave wrong result\");\n            nb += _assert(( exprcst(32, 20)%exprcst(32, 27))->concretize(&ctx) == 20, \"Concretization gave wrong result\");\n            nb += _assert(( exprcst(32, 0xffffffff)%exprcst(32, 4))->concretize(&ctx) == \n                          ( exprcst(32, -1)%exprcst(32, 4))->concretize(&ctx), \"Concretization gave wrong result\");\n\n            nb += _assert( v1->concretize(&ctx) != v1->concretize(&ctx2), \"Concretization with different contexts gave same result\");\n            nb += _assert( v2->concretize(&ctx) != v2->concretize(&ctx2), \"Concretization with different contexts gave same result\"); \n            nb += _assert( v3->concretize(&ctx) != v3->concretize(&ctx2), \"Concretization with different contexts gave same result\"); \n            nb += _assert( v4->concretize(&ctx) != v4->concretize(&ctx2), \"Concretization with different contexts gave same result\"); \n            nb += _assert( (v1|v2)->concretize(&ctx) != (v1|v2)->concretize(&ctx2), \"Concretization with different contexts gave same result\");\n            nb += _assert( e1->concretize(&ctx) != e1->concretize(&ctx2), \"Concretization with different contexts gave same result\"); \n            nb += _assert( e2->concretize(&ctx) != e2->concretize(&ctx2), \"Concretization with different contexts gave same result\"); \n            nb += _assert( e3->concretize(&ctx) != e3->concretize(&ctx2), \"Concretization with different contexts gave same result\"); \n            nb += _assert( e4->concretize(&ctx) != e4->concretize(&ctx2), \"Concretization with different contexts gave same result\"); \n\n            return nb;\n        }\n        \n        unsigned int change_varctx(){\n            unsigned int nb = 0; \n            VarContext ctx = VarContext(0);\n            Expr    v1 = exprvar(32, \"var1\" ),\n                    v2 = exprvar(32, \"var2\"),\n                    v3 = exprvar(64, \"var3\"),\n                    v4 = exprvar(64, \"var4\"),\n                    e1 = v1+v2, \n                    e2 = v3|v4;\n            ctx.set(\"var1\", 100);\n            ctx.set(\"var2\", -2);\n            \n            nb += _assert( v1->concretize(&ctx) == 100, \"Concretization gave wrong result\");\n            nb += _assert( v2->concretize(&ctx) == -2, \"Concretization gave wrong result\"); \n            \n            ctx.set(\"var1\", 10);\n            ctx.set(\"var3\", 0xffff000000000000);\n            ctx.set(\"var4\", 0x0000ffffffffffff);\n            \n            nb += _assert( v1->concretize(&ctx) == 10, \"Concretization gave wrong result\");\n            nb += _assert( v2->concretize(&ctx) == -2, \"Concretization gave wrong result\"); \n            nb += _assert( v3->concretize(&ctx) == 0xffff000000000000, \"Concretization gave wrong result\"); \n            nb += _assert( v4->concretize(&ctx) == 0x0000ffffffffffff, \"Concretization gave wrong result\"); \n            \n            \n            nb += _assert( e1->concretize(&ctx) == exprcst(32, 8)->concretize(&ctx), \"Concretization gave wrong result\"); \n            nb += _assert( e2->concretize(&ctx) == exprcst(64, -1)->concretize(&ctx), \"Concretization gave wrong result\"); \n            \n            ctx.set(\"var2\", -3);\n            ctx.set(\"var4\", 0xffff000000000000);\n            \n            nb += _assert( e1->concretize(&ctx) == exprcst(32, 7)->concretize(&ctx), \"Concretization gave wrong result\"); \n            nb += _assert( e2->concretize(&ctx) == exprcst(64, 0xffff000000000000)->concretize(&ctx), \"Concretization gave wrong result\");\n            \n            return nb;\n        }\n\n    }\n}\n\nusing namespace test::expression; \n// All unit tests \nvoid test_expression(){\n    unsigned int total = 0;\n    string green = \"\\033[1;32m\";\n    string def = \"\\033[0m\";\n    string bold = \"\\033[1m\";\n    \n    // Start testing \n    cout << bold << \"[\" << green << \"+\" << def << bold << \"]\" << def << std::left << std::setw(34) << \" Testing expression module... \" << std::flush;  \n    total += basic();\n    total += canonize();\n    total += hashing();\n    total += concretization();\n    total += change_varctx();\n    // Return res\n    cout << \"\\t\" << total << \"/\" << total << green << \"\\t\\tOK\" << def << endl;\n}\n"
  },
  {
    "path": "tests/test_gadgets.cpp",
    "content": "#include \"database.hpp\"\n#include \"exception.hpp\"\n#include <string>\n#include <sstream>\n#include <tuple>\n#include <iostream>\n#include <iomanip>\n\nusing std::cout;\nusing std::endl; \nusing std::string;\n\nnamespace test{\n    namespace gadgets{\n        unsigned int _assert(bool val, const string& msg){\n            if( !val){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            return 1; \n        }\n\n        unsigned int basic(){\n            unsigned int nb = 0;\n            Arch* arch = new ArchX86();\n\n            ropgadget_to_file(\"/tmp/gadgets.ropium\", \"/tmp/ropgagdet_tmp.ropium\", \"/usr/bin/nmap\");\n\n            vector<RawGadget>* raw = raw_gadgets_from_file(\"/tmp/gadgets.ropg\");\n\n            delete raw;\n            delete arch;\n            return nb;\n        }\n    }\n}\n\nusing namespace test::gadgets; \n// All unit tests \nvoid test_gadgets(){\n    unsigned int total = 0;\n    string green = \"\\033[1;32m\";\n    string def = \"\\033[0m\";\n    string bold = \"\\033[1m\";\n    \n    // Start testing \n    cout << bold << \"[\" << green << \"+\" << def << bold << \"]\" << def << std::left << std::setw(34) << \" Testing gadget analysis... \" << std::flush;  \n    total += basic();\n    // Return res\n    cout << \"\\t\" << total << \"/\" << total << green << \"\\t\\tOK\" << def << endl;\n}\n"
  },
  {
    "path": "tests/test_il.cpp",
    "content": "#include \"il.hpp\"\n#include \"arch.hpp\"\n#include \"exception.hpp\"\n#include <cassert>\n#include <iostream>\n#include <string>\n#include <sstream>\n#include <iomanip>\n#include \"strategy.hpp\"\n\nusing std::cout;\nusing std::endl; \nusing std::string;\n\nnamespace test{\n    namespace il{\n        unsigned int _assert(bool val, const string& msg){\n            if( !val){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            return 1; \n        }\n\n        unsigned int il_parser(){\n            unsigned int nb = 0;\n            ArchX86 arch;\n            \n            // mov reg\n            string str = \"   eax =     ebx\";\n            ILInstruction instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::MOV_REG, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVREG_DST_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVREG_SRC_REG] == X86_EBX, \"Failed to parse IL Instruction\");\n            \n            str = \"ecx=       esi       \";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::MOV_REG, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVREG_DST_REG] == X86_ECX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVREG_SRC_REG] == X86_ESI, \"Failed to parse IL Instruction\");\n            \n            // mov cst\n            str = \"eip=   1234       \";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::MOV_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_DST_REG] == X86_EIP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_SRC_CST] == 1234, \"Failed to parse IL Instruction\");\n            \n            str = \" edx  =12345\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::MOV_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_DST_REG] == X86_EDX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_SRC_CST] == 12345, \"Failed to parse IL Instruction\");\n            \n            str = \"eip = 0x1234       \";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::MOV_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_DST_REG] == X86_EIP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_SRC_CST] == 0x1234, \"Failed to parse IL Instruction\");\n            \n            str = \"eip =   -0x1234       \";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::MOV_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_DST_REG] == X86_EIP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_SRC_CST] == -0x1234, \"Failed to parse IL Instruction\");\n            \n            str = \"eip =   - 0x1234       \";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::MOV_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_DST_REG] == X86_EIP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_SRC_CST] == -0x1234, \"Failed to parse IL Instruction\");\n\n            // amov cst\n            str = \" esp = eax +      42     \\n\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::AMOV_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVCST_DST_REG] == X86_ESP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVCST_SRC_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVCST_SRC_OP] == (int)Op::ADD, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVCST_SRC_CST] == 42, \"Failed to parse IL Instruction\");\n            \n            str = \" esp = esi >>0x3     \\n\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::AMOV_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVCST_DST_REG] == X86_ESP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVCST_SRC_REG] == X86_ESI, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVCST_SRC_OP] == (int)Op::SHR, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVCST_SRC_CST] == 3, \"Failed to parse IL Instruction\");\n            \n            // amov reg\n            str = \" esp = eax *      ebp    \\n\\n\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::AMOV_REG, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVREG_DST_REG] == X86_ESP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVREG_SRC_REG1] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVREG_SRC_OP] == (int)Op::MUL, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVREG_SRC_REG2] == X86_EBP, \"Failed to parse IL Instruction\");\n            \n            str = \" esp = esi <<      esi\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::AMOV_REG, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVREG_DST_REG] == X86_ESP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVREG_SRC_REG1] == X86_ESI, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVREG_SRC_OP] == (int)Op::SHL, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_AMOVREG_SRC_REG2] == X86_ESI, \"Failed to parse IL Instruction\");\n\n            // load\n            str = \" eax = [ esp + 32 ]   \\t\\n\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::LOAD, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOAD_DST_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOAD_SRC_ADDR_REG] == X86_ESP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOAD_SRC_ADDR_OFFSET] == 32, \"Failed to parse IL Instruction\");\n            \n            str = \" eax =[esi-0xabcd   ]\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::LOAD, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOAD_DST_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOAD_SRC_ADDR_REG] == X86_ESI, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOAD_SRC_ADDR_OFFSET] == -0xabcd, \"Failed to parse IL Instruction\");\n            \n            // aload\n            str = \" eax *= [ esp + 32 ]   \\t\\n\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::ALOAD, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOAD_DST_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOAD_OP] == (int)Op::MUL, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOAD_SRC_ADDR_REG] == X86_ESP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOAD_SRC_ADDR_OFFSET] == 32, \"Failed to parse IL Instruction\");\n            \n            str = \" eax <<=[esi]\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::ALOAD, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOAD_DST_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOAD_OP] == (int)Op::SHL, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOAD_SRC_ADDR_REG] == X86_ESI, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOAD_SRC_ADDR_OFFSET] == 0, \"Failed to parse IL Instruction\");\n\n            // load_cst\n            str = \" eax =[-1]\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::LOAD_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOADCST_DST_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOADCST_SRC_ADDR_OFFSET] == -1, \"Failed to parse IL Instruction\");\n\n            str = \"eax=   [0xffffffff]\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::LOAD_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOADCST_DST_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOADCST_SRC_ADDR_OFFSET] == 0xffffffff, \"Failed to parse IL Instruction\");\n\n            // aload_cst\n            str = \" edi ^= [0]\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::ALOAD_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOADCST_DST_REG] == X86_EDI, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOADCST_OP] == (int)Op::XOR, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ALOADCST_SRC_ADDR_OFFSET] == 0, \"Failed to parse IL Instruction\");\n            \n            // store\n            str = \" [eax - 2] = edx\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::STORE, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORE_DST_ADDR_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORE_DST_ADDR_OFFSET] == -2, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORE_SRC_REG] == X86_EDX, \"Failed to parse IL Instruction\");\n            \n            str = \" [ eax] = edx\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::STORE, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORE_DST_ADDR_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORE_DST_ADDR_OFFSET] == 0, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORE_SRC_REG] == X86_EDX, \"Failed to parse IL Instruction\");\n\n            // cst_store\n            str = \" [6789] = edx\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_STORE, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORE_DST_ADDR_OFFSET] == 6789, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORE_SRC_REG] == X86_EDX, \"Failed to parse IL Instruction\");\n\n            str = \" [-2]=eip\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_STORE, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORE_DST_ADDR_OFFSET] == -2, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORE_SRC_REG] == X86_EIP, \"Failed to parse IL Instruction\");\n            \n            // astore\n            str = \" [esp] |= edx\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::ASTORE, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ASTORE_DST_ADDR_REG] == X86_ESP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ASTORE_DST_ADDR_OFFSET] == 0, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ASTORE_OP] == (int)Op::OR, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ASTORE_SRC_REG] == X86_EDX, \"Failed to parse IL Instruction\");\n\n            // cst_astore\n            str = \" [0x1800] %= ebx\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_ASTORE, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTASTORE_DST_ADDR_OFFSET] == 0x1800, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTASTORE_SRC_REG] == X86_EBX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTASTORE_OP] == (int)Op::MOD, \"Failed to parse IL Instruction\");\n\n            // store_cst\n            str = \" [eax - 2] = 42\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::STORE_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORECST_DST_ADDR_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORECST_DST_ADDR_OFFSET] == -2, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORECST_SRC_CST] == 42, \"Failed to parse IL Instruction\");\n\n            str = \" [ eax] = 1234\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::STORE_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORECST_DST_ADDR_REG] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORECST_DST_ADDR_OFFSET] == 0, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_STORECST_SRC_CST] == 1234, \"Failed to parse IL Instruction\");\n\n            // cst_store_cst\n            str = \" [6789] = 0x42\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_STORE_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORECST_DST_ADDR_OFFSET] == 6789, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORECST_SRC_CST] == 0x42, \"Failed to parse IL Instruction\");\n\n            str = \" [-20]=12\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_STORE_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORECST_DST_ADDR_OFFSET] == -20, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORECST_SRC_CST] == 12, \"Failed to parse IL Instruction\");\n\n            // astore_cst\n            str = \" [esp] |= 34\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::ASTORE_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ASTORECST_DST_ADDR_REG] == X86_ESP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ASTORECST_DST_ADDR_OFFSET] == 0, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ASTORECST_OP] == (int)Op::OR, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_ASTORECST_SRC_CST] == 34, \"Failed to parse IL Instruction\");\n\n            // cst_astore_cst\n            str = \" [0x1800] %= 00\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_ASTORE_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTASTORECST_DST_ADDR_OFFSET] == 0x1800, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTASTORECST_SRC_CST] == 0, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTASTORECST_OP] == (int)Op::MOD, \"Failed to parse IL Instruction\");\n\n            // function \n            str = \" 0x1000()\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::FUNCTION, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ADDR] == 0x1000, \"Failed to parse IL Instruction\");\n            \n            str = \" 1000( 22  )\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::FUNCTION, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ADDR] == 1000, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ARGS+0] == 22, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_FUNCTION_ARGS+0] == IL_FUNC_ARG_CST, \"Failed to parse IL Instruction\");\n            \n            str = \" 1000( eax  )\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::FUNCTION, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ADDR] == 1000, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ARGS+0] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_FUNCTION_ARGS+0] == IL_FUNC_ARG_REG, \"Failed to parse IL Instruction\");\n            \n            str = \" 1000( 22, ebx  )\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::FUNCTION, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ADDR] == 1000, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ARGS+0] == 22, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_FUNCTION_ARGS+0] == IL_FUNC_ARG_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ARGS+1] == X86_EBX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_FUNCTION_ARGS+1] == IL_FUNC_ARG_REG, \"Failed to parse IL Instruction\");\n            \n            str = \" 1000( eax , 34, ebx )\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::FUNCTION, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ADDR] == 1000, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ARGS+0] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_FUNCTION_ARGS+0] == IL_FUNC_ARG_REG, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ARGS+1] == 34, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_FUNCTION_ARGS+1] == IL_FUNC_ARG_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ARGS+2] == X86_EBX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_FUNCTION_ARGS+2] == IL_FUNC_ARG_REG, \"Failed to parse IL Instruction\");\n            \n            // syscall\n            str = \" sys_read()\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::SYSCALL, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.syscall_name == \"read\", \"Failed to parse IL Instruction\");\n\n            str = \" sys_truc(42)\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::SYSCALL, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.syscall_name == \"truc\", \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_SYSCALL_ARGS+0] == 42, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_SYSCALL_ARGS+0] == IL_FUNC_ARG_CST, \"Failed to parse IL Instruction\");\n\n            str = \" sys_truc(42, esp, 1)\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::SYSCALL, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.syscall_name == \"truc\", \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_SYSCALL_ARGS+0] == 42, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_SYSCALL_ARGS+0] == IL_FUNC_ARG_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_SYSCALL_ARGS+1] == X86_ESP, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_SYSCALL_ARGS+1] == IL_FUNC_ARG_REG, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_SYSCALL_ARGS+2] == 1, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_SYSCALL_ARGS+2] == IL_FUNC_ARG_CST, \"Failed to parse IL Instruction\");\n\n            str = \" sys_11()\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::SYSCALL, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.syscall_num == 11, \"Failed to parse IL Instruction\");\n\n            str = \" sys_0x42(eax)\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::SYSCALL, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.syscall_num == 0x42, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_SYSCALL_ARGS+0] == X86_EAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_SYSCALL_ARGS+0] == IL_FUNC_ARG_REG, \"Failed to parse IL Instruction\");\n\n            // cst_store_string\n            str = \" [6789] = 'lala'\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_STORE_STRING, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORE_STRING_ADDR_OFFSET] == 6789, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.str == \"lala\", \"Failed to parse IL Instruction\");\n\n            str = \" [6789] = 'lal\\\\\\\\a'\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_STORE_STRING, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORE_STRING_ADDR_OFFSET] == 6789, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.str == \"lal\\\\a\", \"Failed to parse IL Instruction\");\n            \n            str = \" [6789] = 'lal\\\\'a'\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_STORE_STRING, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORE_STRING_ADDR_OFFSET] == 6789, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.str == \"lal'a\", \"Failed to parse IL Instruction\");\n                \n            str = \" [6789] = 'lal\\\\x41\\\\x42a'\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_STORE_STRING, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORE_STRING_ADDR_OFFSET] == 6789, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.str == \"lalABa\", \"Failed to parse IL Instruction\");\n\n            str = \" [0x1234] =  'lalatotokikoo\\\\x00'\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::CST_STORE_STRING, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_CSTSTORE_STRING_ADDR_OFFSET] == 0x1234, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.str == string(\"lalatotokikoo\\x00\", 14), \"Failed to parse IL Instruction\");\n            \n            str = \" syscall \";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::SINGLE_SYSCALL, \"Failed to parse IL Instruction\");\n\n            return nb;\n        }\n\n        unsigned int il_parser64(){\n            unsigned int nb = 0;\n            ArchX64 arch;\n            \n            // parse_il_cst should work with both unsigned and signed constants\n            string str = \" 1000( 0xffffffffffffffff, -1 )\";\n            ILInstruction instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::FUNCTION, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ADDR] == 1000, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ARGS+0] == (cst_t)0xffffffffffffffff, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_FUNCTION_ARGS+0] == IL_FUNC_ARG_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_FUNCTION_ARGS+1] == -1, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args_type[PARAM_FUNCTION_ARGS+1] == IL_FUNC_ARG_CST, \"Failed to parse IL Instruction\");\n            \n            str = \"rax=   [0x7000000000000000]\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::LOAD_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOADCST_DST_REG] == X64_RAX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_LOADCST_SRC_ADDR_OFFSET] == (cst_t)0x7000000000000000, \"Failed to parse IL Instruction\");\n            \n            // 0xffffffffffffffff == -1\n            str = \"rdx=   0xffffffffffffffff\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::MOV_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_DST_REG] == X64_RDX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_SRC_CST] == -1, \"Failed to parse IL Instruction\");\n            \n            // Edge case: -0xffffffffffffffff == -(0xffffffffffffffff) == -(-1) == 1\n            str = \"rcx=   -0xffffffffffffffff\";\n            instr = ILInstruction(arch, str);\n            nb += _assert(instr.type == ILInstructionType::MOV_CST, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_DST_REG] == X64_RCX, \"Failed to parse IL Instruction\");\n            nb += _assert(instr.args[PARAM_MOVCST_SRC_CST] == 1, \"Failed to parse IL Instruction\");\n            \n            return nb;\n        }\n    }\n}\n\nusing namespace test::il; \n// All unit tests \nvoid test_il(){\n    unsigned int total = 0;\n    string green = \"\\033[1;32m\";\n    string def = \"\\033[0m\";\n    string bold = \"\\033[1m\";\n    \n    // Start testing \n    cout << bold << \"[\" << green << \"+\" << def << bold << \"]\" << def << std::left << std::setw(34) << \" Testing il module... \" << std::flush;  \n    total += il_parser();\n    total += il_parser64();\n    // Return res\n    cout << \"\\t\" << total << \"/\" << total << green << \"\\t\\tOK\" << def << endl;\n}\n"
  },
  {
    "path": "tests/test_ir.cpp",
    "content": "#include \"ir.hpp\"\n#include \"exception.hpp\"\n#include <cassert>\n#include <iostream>\n#include <string>\n#include <sstream>\n#include <iomanip>\n\nusing std::cout;\nusing std::endl; \nusing std::string;\n\nnamespace test{\n    namespace ir{\n        unsigned int _assert(bool val, const string& msg){\n            if( !val){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            return 1; \n        }\n        \n        unsigned int ir_context(){\n            IRContext ctx = IRContext(4);\n            Expr    e1 = exprcst(32, 56),\n                    e2 = exprvar(64, \"var1\"),\n                    e3 = exprvar(16, \"var2\");\n            unsigned int nb = 0;\n            ctx.set(0, e1);\n            ctx.set(1, e1);\n            ctx.set(2, e2);\n            ctx.set(3, e3);\n            nb += _assert(ctx.get(0)->eq(e1), \"IRContext failed to update then get variable\");\n            nb += _assert(ctx.get(1)->eq(e1), \"IRContext failed to update then get variable\");\n            nb += _assert(ctx.get(2)->eq(e2), \"IRContext failed to update then get variable\");\n            nb += _assert(ctx.get(3)->eq(e3), \"IRContext failed to update then get variable\");\n            return nb; \n        }\n    }\n    \n    \n}\n\nusing namespace test::ir; \n// All unit tests \nvoid test_ir(){\n    unsigned int total = 0;\n    string green = \"\\033[1;32m\";\n    string def = \"\\033[0m\";\n    string bold = \"\\033[1m\";\n    \n    // Start testing \n    cout << bold << \"[\" << green << \"+\" << def << bold << \"]\" << def << std::left << std::setw(34) << \" Testing ir module... \" << std::flush;  \n    total += ir_context();\n    // Return res\n    cout << \"\\t\" << total << \"/\" << total << green << \"\\t\\tOK\" << def << endl;\n}\n"
  },
  {
    "path": "tests/test_simplification.cpp",
    "content": "#include \"expression.hpp\"\n#include \"simplification.hpp\"\n#include \"exception.hpp\"\n#include <cassert>\n#include <iostream>\n#include <string>\n#include <sstream>\n#include <iomanip>\n\nusing std::cout;\nusing std::endl; \nusing std::string;\n\nnamespace test{\n    namespace simplification{        \n        unsigned int _assert_simplify(Expr e1, Expr e2, ExprSimplifier& simp){\n            Expr tmp1 = simp.simplify(e1);\n            Expr tmp2 = simp.simplify(e2);\n            if( tmp1->neq(tmp2) ){\n                cout << \"\\nFail: _assert_simplify: \" << e1 << \" => \" << e2 << endl\n                << \"Note: instead simplified into \" << tmp1 << \" => \" << tmp2 << endl << std::flush; \n                throw test_exception();\n            }\n            return 1; \n        }\n        \n        unsigned int _assert(bool val, const string& msg){\n            if( !val){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            return 1; \n        }\n        \n        unsigned int basic(ExprSimplifier& s){\n            Expr e1 = exprvar(32,\"varA\");\n            return 0;\n        }\n        \n        unsigned int const_folding(ExprSimplifier& s){\n            unsigned int nb = 0;\n            Expr e1 = exprcst(32,-1), e2 =exprcst(32, 1048567);\n            nb += _assert_simplify(exprcst(16,2)+exprcst(16,4), exprcst(16,6), s);\n            nb += _assert_simplify(exprcst(4,3)*exprcst(4,7),  exprcst(4, 5), s);\n            nb += _assert_simplify(exprcst(8,0xc3)/exprcst(8,0x40),  exprcst(8, 3), s);\n            nb += _assert_simplify(exprcst(16, 321)/exprcst(16, 40), exprcst(16, 321U/40U), s);\n            nb += _assert_simplify(sdiv(exprcst(16, 567),exprcst(16, 56)), exprcst(16, 567/56), s);\n            nb += _assert_simplify(exprcst(16, 0x2)&exprcst(16, 0x1234), exprcst(16, 0x2&0x1234), s);\n            nb += _assert_simplify(exprcst(16, 0x2)|exprcst(16, 0x1234), exprcst(16, 0x2|0x1234), s);\n            nb += _assert_simplify(exprcst(16, 0x2)^exprcst(16, 0x1234), exprcst(16, 0x2^0x1234), s);\n            nb += _assert_simplify(shl(exprcst(16, 1),exprcst(16, 4)), exprcst(16, 16), s);\n            nb += _assert_simplify(shr(exprcst(16, 16),exprcst(16, 4)), exprcst(16, 1), s);\n            nb += _assert_simplify(shl(exprcst(16, 1), exprcst(16, 16)), exprcst(16,0), s);\n            \n            nb += _assert_simplify(extract(exprcst(8, 20), 4, 2), exprcst(3, 5), s);\n            nb += _assert_simplify(concat(exprcst(8, 1), exprcst(4, -1)), exprcst(12, 0x1f), s);\n            \n            nb += _assert_simplify(-exprcst(7,3),  exprcst(7, -3), s);\n            nb += _assert_simplify(~exprcst(7,3),  exprcst(7, ~3), s);\n            \n            nb += _assert_simplify(e2+e1-e1, e2, s);\n            \n            nb += _assert_simplify(bisz(32, e1, 1), exprcst(32, 0), s);\n            nb += _assert_simplify(bisz(23, exprcst(32,0), 1), exprcst(23, 1), s);\n            \n            return nb; \n        }\n        unsigned int neutral_elems(ExprSimplifier& s){\n            unsigned int nb = 0;\n            nb += _assert_simplify(exprvar(32,\"var1\")+exprcst(32, 0), exprvar(32, \"var1\"), s);\n            nb += _assert_simplify(exprvar(32,\"var1\")*exprcst(32, 1), exprvar(32, \"var1\"), s);\n            nb += _assert_simplify(exprvar(32,\"var1\")/exprcst(32, 1), exprvar(32, \"var1\"), s);\n            nb += _assert_simplify(sdiv(exprvar(32,\"var1\"),exprcst(32, 1)), exprvar(32, \"var1\"), s);\n            nb += _assert_simplify(exprvar(7,\"var1\")&exprcst(7, 0b1111111), exprvar(7, \"var1\"), s);\n            nb += _assert_simplify(exprvar(6,\"var1\")|exprcst(6, 0), exprvar(6, \"var1\"), s);\n            nb += _assert_simplify(exprvar(32,\"var1\")^exprcst(32, 0), exprvar(32, \"var1\"), s);\n            nb += _assert_simplify(extract(exprvar(32,\"var1\"), 31, 0), exprvar(32, \"var1\"), s);\n            return nb; \n        }\n        \n        unsigned int absorbing_elems(ExprSimplifier& s){\n            unsigned int nb = 0;\n            nb += _assert_simplify(exprvar(33,\"var1\")*exprcst(33,0), exprcst(33,0), s);\n            nb += _assert_simplify(exprvar(6, \"var1\")|exprcst(6,0b111111), exprcst(6,0b111111), s);\n            nb += _assert_simplify(exprvar(5,\"var1\")&exprcst(5,0), exprcst(5,0), s);\n            nb += _assert_simplify(shl(exprvar(32,\"var1\"),exprcst(32, 50)), exprcst(32,0), s);\n            nb += _assert_simplify(shr(exprvar(32,\"var1\"),exprcst(32, 32)), exprcst(32,0), s);\n            return nb; \n        }\n        \n        unsigned int arithmetic_properties(ExprSimplifier& s){\n            unsigned int nb = 0;\n            Expr    e1 = exprvar(64, \"var1\"),\n                    e2 = exprvar(64, \"var2\"),\n                    e3 = exprvar(64, \"var3\"),\n                    e4 = e1/e2,\n                    c1 = exprcst(64, 1);\n            nb += _assert_simplify( e1+(e1*e2), (e2+c1)*e1, s);\n            nb += _assert_simplify( (e2*e1)+e1, (e2+c1)*e1, s);\n            nb += _assert_simplify( (e1*e2)-e1, (e2-c1)*e1, s);\n            nb += _assert_simplify( (e2*e1)-e1, (e2-c1)*e1, s);\n            nb += _assert_simplify( (e1*e3)+(e2*e3), (e1+e2)*e3 , s);\n            nb += _assert_simplify( (e3*e1)+(e2*e3), (e1+e2)*e3 , s);\n            nb += _assert_simplify( (e1*e3)+(e3*e2), (e1+e2)*e3 , s);\n            nb += _assert_simplify( (e3*e1)+(e3*e2), (e1+e2)*e3 , s);\n            nb += _assert_simplify( (e4+(e4*e3)), (e3+c1)*e4, s);\n            nb += _assert_simplify( (e4+(e3*e4)), (e3+c1)*e4, s);\n            nb += _assert_simplify( (-e4+(e4*e3)), (e3-c1)*e4, s);\n            nb += _assert_simplify( (-e4+(e3*e4)), (e3-c1)*e4, s);\n            nb += _assert_simplify( (e4+e4) , e4*exprcst(64, 2), s); \n            nb += _assert_simplify( e4-e4, exprcst(64,0), s);\n            nb += _assert_simplify( -e3+e3, exprcst(64,0), s);\n            return nb; \n        }\n        \n        unsigned int involution(ExprSimplifier& s){\n            unsigned int nb = 0;\n            nb += _assert_simplify( -(-exprvar(64, \"var1\")), exprvar(64, \"var1\"), s);\n            nb += _assert_simplify( ~~exprvar(64, \"var1\"), exprvar(64, \"var1\"), s);\n            return nb; \n        }\n        \n        unsigned int extract_patterns(ExprSimplifier& s){\n            unsigned int nb = 0;\n            Expr e1 = exprvar(32,\"var1\"), e2 = exprvar(14, \"var2\"); \n            Expr e = concat(e1, e2);\n            nb += _assert_simplify(extract(e, 45, 40), extract(e1, 31, 26), s);\n            nb += _assert_simplify(extract(e, 8, 1), extract(e2, 8, 1), s);\n            nb += _assert_simplify(extract(extract(e1, 28,10),8,1), extract(e1, 18,11), s);\n            nb += _assert_simplify(extract(extract(exprcst(64,0xffffff), 31,0),10,10), \n                                   extract(exprcst(64,0xffffff), 10,10), s);\n            return nb; \n        }\n        \n        unsigned int basic_transform(ExprSimplifier& s){\n            unsigned int nb = 0;\n            Expr e1 = exprvar(56, \"var1\");\n            Expr e2 = exprmem(56, e1); \n            nb += _assert_simplify(shl(e1, exprcst(56, 3)), e1*exprcst(56, 8), s);\n            nb += _assert_simplify(shr(e1, exprcst(56, 4)), e1/exprcst(56, 16), s);\n            nb += _assert_simplify(exprcst(56, -1)*e1, -e1, s);\n            nb += _assert_simplify((~e1)+exprcst(56,1), -e1, s);\n            nb += _assert_simplify((~(-e1))+exprcst(56,1), e1, s);\n            nb += _assert_simplify(e1*(-e2), -(e2*e1), s);\n            nb += _assert_simplify((-e1)*e2, -(e2*e1), s);\n            return nb; \n        }\n        \n        unsigned int logical_properties(ExprSimplifier& s){\n            unsigned int nb = 0;\n            Expr e = exprvar(64, \"var1\");\n            nb += _assert_simplify(e&e, e, s);\n            nb += _assert_simplify(e|e, e, s);\n            nb += _assert_simplify(e&(~e), exprcst(64,0), s);\n            nb += _assert_simplify((~e)&e, exprcst(64,0), s);\n            nb += _assert_simplify((~e)^e, exprcst(64,-1), s);\n            nb += _assert_simplify(e^(~e), exprcst(64,-1), s);\n            nb += _assert_simplify((~e)|e, exprcst(64,-1), s);\n            nb += _assert_simplify(e|(~e), exprcst(64,-1), s);\n            nb += _assert_simplify(e^e, exprcst(64,0), s);\n            return nb; \n        }\n        \n        unsigned int concat_patterns(ExprSimplifier& s){\n            unsigned int nb = 0;\n            Expr e = exprvar(64, \"var1\");\n            Expr    v1 = exprvar(8, \"a\"),\n                    c1 = exprcst(24, 0x100c3);\n            Expr e1 = concat(v1, c1);\n            nb += _assert_simplify(concat(extract(e, 63,10), extract(e,9,0)), e, s);\n            nb += _assert_simplify(extract( concat(extract(e1, 31, 8), extract(e1, 7, 0)>>6), 7, 0) - 3,   (extract(e1, 7, 0)>>6)-3, s);\n            \n            e1 = exprcst(32, 0x00ffffff) & concat(v1, c1);\n            nb += _assert_simplify(e1, concat(exprcst(8, 0), c1), s);\n            \n            e1 = exprcst(32, 0xff000000) & concat(v1, c1);\n            nb += _assert_simplify(e1, concat(v1, exprcst(24, 0)), s);\n\n            e1 = exprcst(64, 0xffffffff) & concat(exprvar(32, \"blabla\"), exprcst(32, 0));\n            nb += _assert_simplify(e1, exprcst(64, 0), s);\n\n            e1 = exprcst(64, 0xffffffff00000000) & concat(exprcst(32, 0), exprvar(32, \"blu\"));\n            nb += _assert_simplify(e1, exprcst(64, 0), s);\n\n            return nb; \n        }\n        \n        unsigned int advanced(ExprSimplifier& s){\n            unsigned int nb = 0; \n            Expr    e1 = exprvar(32,\"varA\"),\n                    e2 = exprvar(32,\"varB\"),\n                    e3 = exprcst(32, -1), \n                    e4 = exprcst(32, 0xffff7),\n                    e5 = e3+e4, \n                    e6 = e4/e1,\n                    e7 = shr(e5,exprcst(32, 1)),\n                    e8 = exprmem(32, e3),\n                    e9 = concat(extract(e1, 31, 16), extract(e4, 15, 0));\n            \n            nb += _assert_simplify(((e1-e2)*e6)^e8, e8^((e1-e2+e2-e2)*(e6&e6)), s);\n            nb += _assert_simplify(e1+e2+e3-e1+e4-e2-e3, e4, s);\n            nb += _assert_simplify(e3*e4, exprcst(32, 0xfffffffffff00009), s);\n            nb += _assert_simplify(e4*e4, exprcst(32, 0xfffee00051), s);\n            nb += _assert_simplify(exprcst(32, 0xfffee00051)*e3, exprcst(32, 0xffffff00011fffaf), s);\n            nb += _assert_simplify(e4*(e3-e3+e4)*e3, e4*e3*e4, s);\n            nb += _assert_simplify(e3*e4*(e1+e2+e3-e1+e4-e2-e3), e4*e4*e3, s);\n            nb += _assert_simplify(e2/(e1+e1-e1), e2/e1, s);\n            nb += _assert_simplify(e8, e8+e5-e5, s);\n            nb += _assert_simplify((e6/e1/e8), (e6/(e8+e5-e5)/e1), s);\n            nb += _assert_simplify((e6/e7/e8), (e6/(e8+e5-e5)/e7), s);\n            nb += _assert_simplify(e9|e9, e9, s);\n            nb += _assert_simplify((e2&(~e1))&e1, exprcst(32,0), s);\n            //nb += _assert_simplify(extract(e8^(e9^~e8), 31, 0), e8&(-e6+(e9|e9)+e6) , s);\n            /*nb += _assert_simplify(, , s);\n            nb += _assert_simplify(, , s);\n            nb += _assert_simplify(, , s);\n            nb += _assert_simplify(, , s);*/\n            //nb += _assert_simplify(, , s);\n            return nb; \n        }\n    }\n}\n\nusing namespace test::simplification;\n// All unit tests \nvoid test_simplification(){\n    ExprSimplifier simp = ExprSimplifier();\n    simp.add(es_constant_folding);\n    simp.add(es_neutral_elements);\n    simp.add(es_absorbing_elements);\n    simp.add(es_arithmetic_properties);\n    simp.add(es_involution);\n    simp.add(es_extract_patterns);\n    simp.add(es_basic_transform);\n    simp.add(es_logical_properties);\n    simp.add(es_concat_patterns);\n    simp.add(es_arithmetic_factorize);\n    //simp.add(es_generic_distribute);\n    simp.add(es_generic_factorize);\n    simp.add(es_deep_associative);\n    \n    unsigned int total = 0;\n    string green = \"\\033[1;32m\";\n    string def = \"\\033[0m\";\n    string bold = \"\\033[1m\";\n    \n    // Start testing \n    cout << bold << \"[\" << green << \"+\" << def << bold << \"]\" << def << \" Testing simplification module... \" << std::flush;\n    for( int i = 0; i < 1; i++){\n        total += basic(simp);\n        total += const_folding(simp);\n        total += neutral_elems(simp);\n        total += absorbing_elems(simp);\n        total += arithmetic_properties(simp);\n        total += involution(simp);\n        total += extract_patterns(simp);\n        total += basic_transform(simp);\n        total += logical_properties(simp);\n        total += concat_patterns(simp);\n        total += advanced(simp);\n    }\n\n    // Return res\n    cout << \"\\t\" << total << \"/\" << total << green << \"\\t\\tOK\" << def << endl;\n}\n"
  },
  {
    "path": "tests/test_strategy.cpp",
    "content": "#include \"strategy.hpp\"\n#include \"compiler.hpp\"\n#include \"il.hpp\"\n#include \"exception.hpp\"\n#include \"utils.hpp\"\n#include <cassert>\n#include <iostream>\n#include <string>\n#include <sstream>\n#include <iomanip>\n\nusing std::cout;\nusing std::endl; \nusing std::string;\n\nnamespace test{\n    namespace strategy{        \n\n        \n        unsigned int _assert(bool val, const string& msg){\n            if( !val){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            return 1; \n        }\n        \n        unsigned int _assert_ropchain(ROPChain* ropchain, const string& msg){\n            if( ropchain == nullptr){\n                cout << \"\\nFail: \" << msg << endl << std::flush; \n                throw test_exception();\n            }\n            delete ropchain;\n            return 1; \n        }\n        \n        unsigned int basic(){\n            unsigned int nb = 0;\n            StrategyGraph sgraph;\n            node_t n1 = sgraph.new_node(GadgetType::MOV_REG);\n            node_t n2 = sgraph.new_node(GadgetType::MOV_REG);\n            Node& node1 = sgraph.nodes[n1];\n            Node& node2 = sgraph.nodes[n2];\n            node1.params[PARAM_MOVREG_SRC_REG].make_reg(X86_EAX);\n            node1.params[PARAM_MOVREG_DST_REG].make_reg(n2, PARAM_MOVREG_SRC_REG);\n            node2.params[PARAM_MOVREG_SRC_REG].make_reg(-1, false);\n            node2.params[PARAM_MOVREG_DST_REG].make_reg(X86_ECX);\n            sgraph.add_strategy_edge(n1, n2);\n            sgraph.add_param_edge(n1, n2);\n            //std::cout << sgraph;\n            \n            sgraph.rule_generic_transitivity(n1);\n            sgraph.compute_dfs_params();\n            sgraph.compute_dfs_strategy();\n            //std::cout << sgraph;\n            return nb;\n        }\n        \n        unsigned int rules(){\n            unsigned int nb = 0;\n            Arch* arch = new ArchX86();\n            GadgetDB db;\n            ROPChain* ropchain;\n\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x89\\xf9\\xbb\\x01\\x00\\x00\\x00\\xc3\", 8), 1)); // mov ecx, edi; mov ebx, 1; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC8\\xC3\", 3), 2)); // mov eax, ecx; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC3\\xC3\", 3), 3)); // mov ebx, eax; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xCB\\xC3\", 3), 4)); // mov ebx, ecx; ret\n            raw.push_back(RawGadget(string(\"\\xbb\\x04\\x00\\x00\\x00\\xc3\", 6), 5)); // mov ebx, 4; ret\n            raw.push_back(RawGadget(string(\"\\xb8\\x05\\x00\\x00\\x00\\xc3\", 6), 6)); // mov eax, 5; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xc2\\xc3\", 3), 7)); // mov edx, eax; ret\n            raw.push_back(RawGadget(string(\"\\x5f\\x5e\\x59\\xc3\", 4), 8)); // pop edi; pop esi; pop ecx; ret\n            \n            raw.push_back(RawGadget(string(\"\\x89\\xF5\\xFF\\xE0\", 4), 9)); // mov ebp, esi; jmp eax\n            raw.push_back(RawGadget(string(\"\\xB8\\x09\\x00\\x00\\x00\\xC3\", 6), 10)); // mov eax, 9; ret\n            raw.push_back(RawGadget(string(\"\\x8B\\x4F\\x04\\xFF\\xE0\", 5), 11)); // mov ecx, [edi+4]; jmp eax\n            \n            db.analyse_raw_gadgets(raw, arch);\n            \n            // Test register transitivity\n            StrategyGraph sgraph;\n            node_t n1 = sgraph.new_node(GadgetType::MOV_REG);\n            node_t n2 = sgraph.new_node(GadgetType::MOV_REG);\n            Node& node1 = sgraph.nodes[n1];\n            Node& node2 = sgraph.nodes[n2];\n            node1.params[PARAM_MOVREG_SRC_REG].make_reg(X86_EDI);\n            node1.params[PARAM_MOVREG_DST_REG].make_reg(n2, PARAM_MOVREG_SRC_REG);\n            node2.params[PARAM_MOVREG_SRC_REG].make_reg(0, false);\n            node2.params[PARAM_MOVREG_DST_REG].make_reg(X86_EBX);\n            sgraph.add_strategy_edge(n1, n2);\n            sgraph.add_param_edge(n1, n2);\n            // Apply strat\n            sgraph.rule_generic_transitivity(n2);\n            sgraph.select_gadgets(db);\n            ropchain = sgraph.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            // Test constant param resolving\n            StrategyGraph graph2;\n            n1 = graph2.new_node(GadgetType::MOV_CST);\n            n2 = graph2.new_node(GadgetType::MOV_CST);\n            Node& node1a = graph2.nodes[n1];\n            Node& node2a = graph2.nodes[n2];\n            node1a.params[PARAM_MOVCST_SRC_CST].make_cst(n2, PARAM_MOVCST_SRC_CST, exprvar(32, \"cst1\")+1, \"cst2\");\n            node1a.params[PARAM_MOVCST_DST_REG].make_reg(X86_EAX);\n            node2a.params[PARAM_MOVCST_SRC_CST].make_cst(0, \"cst1\", false); // free\n            node2a.params[PARAM_MOVCST_DST_REG].make_reg(-1, false); // free\n            graph2.add_strategy_edge(n1, n2);\n            graph2.add_param_edge(n1, n2);\n\n            graph2.select_gadgets(db);\n            ropchain = graph2.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            // Test MovCst transitivity\n            StrategyGraph graph3;\n            n1 = graph3.new_node(GadgetType::MOV_CST);\n            Node& node1b = graph3.nodes[n1];\n            node1b.params[PARAM_MOVCST_SRC_CST].make_cst(5, \"cst_1\");\n            node1b.params[PARAM_MOVCST_DST_REG].make_reg(X86_EDX);\n            // Apply strat\n            graph3.rule_generic_transitivity(n1);\n            graph3.select_gadgets(db);\n            ropchain = graph3.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n            \n            // Test MovCst pop\n            StrategyGraph graph4;\n            n1 = graph4.new_node(GadgetType::MOV_CST);\n            Node& node1c = graph4.nodes[n1];\n            node1c.params[PARAM_MOVCST_SRC_CST].make_cst(0x1234, \"cst_2\");\n            node1c.params[PARAM_MOVCST_DST_REG].make_reg(X86_ESI);\n            // Apply strat\n            graph4.rule_mov_cst_pop(n1, arch);\n            graph4.select_gadgets(db, nullptr, arch);\n            ropchain = graph4.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            // Test generic adjust jmp\n            StrategyGraph graph5;\n            n1 = graph5.new_node(GadgetType::MOV_REG);\n            Node& node1d = graph5.nodes[n1];\n            node1d.params[PARAM_MOVREG_SRC_REG].make_reg(X86_ESI);\n            node1d.params[PARAM_MOVREG_DST_REG].make_reg(X86_EBP);\n            node1d.branch_type = BranchType::RET; // So the rule applies\n            // Apply strat\n            graph5.rule_generic_adjust_jmp(n1, arch);\n            graph5.select_gadgets(db, nullptr, arch);\n            ropchain = graph5.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n            \n            StrategyGraph graph6;\n            n1 = graph6.new_node(GadgetType::LOAD);\n            Node& node1e = graph6.nodes[n1];\n            node1e.params[PARAM_LOAD_SRC_ADDR_REG].make_reg(X86_EDI);\n            node1e.params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst(4, \"cstlalal\");\n            node1e.params[PARAM_LOAD_DST_REG].make_reg(X86_ECX);\n            node1e.branch_type = BranchType::RET; // So the rule applies\n            // Apply strat\n            graph6.rule_generic_adjust_jmp(n1, arch);\n            graph6.select_gadgets(db, nullptr, arch);\n            ropchain = graph6.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            delete arch;\n            return nb;\n        }\n        \n        \n        unsigned int test_generic_adjust_jmp(){\n            unsigned int nb = 0;\n            Arch* arch = new ArchX86();\n            GadgetDB db;\n            ROPChain* ropchain;\n\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x89\\xC8\\xC3\", 3), 1)); // mov eax, ecx; ret\n            raw.push_back(RawGadget(string(\"\\xc3\", 1), 2)); // ret\n            raw.push_back(RawGadget(string(\"\\x89\\xF1\\xFF\\xE0\", 4), 3)); // mov ecx, esi; jmp eax\n            raw.push_back(RawGadget(string(\"\\x5A\\x59\\xC3\", 3), 4)); // pop edx; pop ecx; ret\n            db.analyse_raw_gadgets(raw, arch);\n\n            // Test on more advanced example (eax = esi)\n            StrategyGraph sgraph;\n            node_t n1 = sgraph.new_node(GadgetType::MOV_REG);\n            Node& node1 = sgraph.nodes[n1];\n            node1.params[PARAM_MOVREG_DST_REG].make_reg(X86_EAX);\n            node1.params[PARAM_MOVREG_SRC_REG].make_reg(X86_ESI);\n            // Apply strat\n            sgraph.rule_generic_transitivity(n1);\n            sgraph.rule_generic_adjust_jmp(1, arch);\n            sgraph.rule_generic_transitivity(3);\n            sgraph.rule_mov_cst_pop(5, arch);\n            sgraph.select_gadgets(db, nullptr, arch);\n            ropchain = sgraph.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            delete arch;\n            return nb;\n        }\n        \n        \n        unsigned int test_adjust_load(){\n            unsigned int nb = 0;\n            Arch* arch = new ArchX86();\n            GadgetDB db;\n            ROPChain* ropchain;\n\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x8B\\x41\\x08\\xC3\", 4), 1)); // mov eax, [ecx + 8]; ret\n            raw.push_back(RawGadget(string(\"\\x8D\\x4B\\x08\\xC3\", 4), 2)); // lea ecx, [ebx + 8]; ret\n\n            raw.push_back(RawGadget(string(\"\\x23\\x56\\xF8\\xC3\", 4), 3)); // and edx, [esi - 8]; ret\n            raw.push_back(RawGadget(string(\"\\x8D\\x77\\x10\\xC3\", 4), 4)); // lea esi, [edi + 16]; ret\n            db.analyse_raw_gadgets(raw, arch);\n\n            // Test adjust load on LOAD type\n            StrategyGraph sgraph;\n            node_t n1 = sgraph.new_node(GadgetType::LOAD);\n            Node& node1 = sgraph.nodes[n1];\n            node1.params[PARAM_LOAD_DST_REG].make_reg(X86_EAX);\n            node1.params[PARAM_LOAD_SRC_ADDR_REG].make_reg(X86_EBX);\n            node1.params[PARAM_LOAD_SRC_ADDR_OFFSET].make_cst(0x10, \"cst_0\");\n            // Apply strat\n            sgraph.rule_adjust_load(n1, arch);\n            sgraph.select_gadgets(db, nullptr, arch);\n            ropchain = sgraph.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            // Test adjust load on ALOAD type\n            StrategyGraph sgraph2;\n            node_t n2 = sgraph2.new_node(GadgetType::ALOAD);\n            Node& node2 = sgraph2.nodes[n2];\n            node2.params[PARAM_ALOAD_DST_REG].make_reg(X86_EDX);\n            node2.params[PARAM_ALOAD_OP].make_op(Op::AND);\n            node2.params[PARAM_ALOAD_SRC_ADDR_REG].make_reg(X86_EDI);\n            node2.params[PARAM_ALOAD_SRC_ADDR_OFFSET].make_cst(8, \"cst_0\");\n            // Apply strat\n            sgraph2.rule_adjust_load(n2, arch);\n            sgraph2.select_gadgets(db, nullptr, arch);\n            ropchain = sgraph2.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n            delete arch;\n            return nb;\n        }\n        \n        unsigned int test_generic_src_transitivity(){\n            unsigned int nb = 0;\n            Arch* arch = new ArchX86();\n            GadgetDB db;\n            ROPChain* ropchain;\n\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x89\\x51\\xEC\\xC3\", 4), 1)); // mov [ecx - 20], edx; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC2\\xC3\", 3), 2)); // mov edx, eax; ret\n            raw.push_back(RawGadget(string(\"\\x31\\x51\\xEC\\xC3\", 4), 3)); // xor [ecx - 20], edx; ret\n            db.analyse_raw_gadgets(raw, arch);\n\n            // Test src transitivity on STORE\n            StrategyGraph sgraph;\n            node_t n1 = sgraph.new_node(GadgetType::STORE);\n            Node& node1 = sgraph.nodes[n1];\n            node1.params[PARAM_STORE_DST_ADDR_REG].make_reg(X86_ECX);\n            node1.params[PARAM_STORE_DST_ADDR_OFFSET].make_cst(-20, \"cst_0\");\n            node1.params[PARAM_STORE_SRC_REG].make_reg(X86_EAX);\n            // Apply strat\n            sgraph.rule_generic_src_transitivity(n1);\n            sgraph.select_gadgets(db, nullptr, arch);\n            ropchain = sgraph.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n            \n            // Test src transitivity on ASTORE\n            StrategyGraph sgraph2;\n            node_t n2 = sgraph2.new_node(GadgetType::ASTORE);\n            Node& node2 = sgraph2.nodes[n2];\n            node2.params[PARAM_ASTORE_DST_ADDR_REG].make_reg(X86_ECX);\n            node2.params[PARAM_ASTORE_DST_ADDR_OFFSET].make_cst(-20, \"cst_0\");\n            node2.params[PARAM_ASTORE_SRC_REG].make_reg(X86_EAX);\n            node2.params[PARAM_ASTORE_OP].make_op(Op::XOR);\n            // Apply strat\n            sgraph2.rule_generic_src_transitivity(n2);\n            sgraph2.select_gadgets(db, nullptr, arch);\n            ropchain = sgraph2.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            return nb;\n        }\n        \n        unsigned int test_adjust_store(){\n            unsigned int nb = 0;\n            Arch* arch = new ArchX86();\n            GadgetDB db;\n            ROPChain* ropchain;\n\n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x89\\x41\\x08\\xC3\", 4), 1)); // mov [ecx + 8], eax; ret\n            raw.push_back(RawGadget(string(\"\\x8D\\x4B\\x08\\xC3\", 4), 2)); // lea ecx, [ebx + 8]; ret\n\n            raw.push_back(RawGadget(string(\"\\x21\\x56\\xF8\\xC3\", 4), 3)); // and [esi - 8], edx; ret\n            raw.push_back(RawGadget(string(\"\\x8D\\x77\\x10\\xC3\", 4), 4)); // lea esi, [edi + 16]; ret\n            \n            raw.push_back(RawGadget(string(\"\\x89\\xC8\\xC3\", 3), 5)); // mov eax, ecx; ret\n            raw.push_back(RawGadget(string(\"\\x89\\xC3\\xC3\", 3), 6)); // mov ebx, eax; ret\n            raw.push_back(RawGadget(string(\"\\x89\\x43\\x08\\xC3\", 4), 7)); // mov [ebx + 8], eax; ret\n\n            db.analyse_raw_gadgets(raw, arch);\n\n            // Test adjust store on STORE type\n            StrategyGraph sgraph;\n            node_t n1 = sgraph.new_node(GadgetType::STORE);\n            Node& node1 = sgraph.nodes[n1];\n            node1.params[PARAM_STORE_DST_ADDR_REG].make_reg(X86_EBX);\n            node1.params[PARAM_STORE_DST_ADDR_OFFSET].make_cst(16, \"cst_0\");\n            node1.params[PARAM_STORE_SRC_REG].make_reg(X86_EAX);\n            // Apply strat\n            sgraph.rule_adjust_store(n1, arch);\n            sgraph.select_gadgets(db, nullptr, arch);\n            ropchain = sgraph.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            // Test adjust store on ASTORE type\n            StrategyGraph sgraph2;\n            node_t n2 = sgraph2.new_node(GadgetType::ASTORE);\n            Node& node2 = sgraph2.nodes[n2];\n            node2.params[PARAM_ASTORE_DST_ADDR_REG].make_reg(X86_EDI);\n            node2.params[PARAM_ASTORE_DST_ADDR_OFFSET].make_cst(8, \"cst_0\");\n            node2.params[PARAM_ASTORE_SRC_REG].make_reg(X86_EDX);\n            node2.params[PARAM_ASTORE_OP].make_op(Op::AND);\n            // Apply strat\n            sgraph2.rule_adjust_store(n2, arch);\n            sgraph2.select_gadgets(db, nullptr, arch);\n            ropchain = sgraph2.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            // ANother adjust store on STORE mixed with src_transitivity\n            StrategyGraph sgraph3;\n            node_t n3 = sgraph3.new_node(GadgetType::STORE);\n            Node& node3 = sgraph3.nodes[n3];\n            node3.params[PARAM_STORE_DST_ADDR_REG].make_reg(X86_EAX);\n            node3.params[PARAM_STORE_DST_ADDR_OFFSET].make_cst(8, \"cst_0\");\n            node3.params[PARAM_STORE_SRC_REG].make_reg(X86_ECX);\n            // Apply strat\n            sgraph3.rule_adjust_store(n3, arch);\n            sgraph3.rule_generic_src_transitivity(2);\n\n            sgraph3.select_gadgets(db, nullptr, arch);\n            ropchain = sgraph3.get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Basic application of strategy rules failed\");\n\n            delete arch;\n            return nb;\n        }\n        \n        unsigned int test_cst_pop(){\n            unsigned int nb = 0;\n            /*\n            Arch* arch = new ArchX86();\n            GadgetDB db;\n            ROPChain* ropchain;\n            \n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x59\\x58\\x5B\\xFF\\xE0\", 5), 1)); // pop ecx; pop eax; pop ebx; jmp eax\n            raw.push_back(RawGadget(string(\"\\xC3\", 1), 2)); // ret\n            db.analyse_raw_gadgets(raw, arch);\n\n            // Test cst_pop on a function call strategy graph \n            vector<StrategyGraph*> graphs;\n            ROPCompiler* comp = new ROPCompiler(arch, &db);\n            vector<ILInstruction> instrs = comp->parse(\"0x1234()\");\n            comp->il_to_strategy(graphs, instrs[0], NULL, ABI::X86_CDECL);\n\n            // Apply strat\n            graphs[0]->rule_mov_cst_pop(1, arch);\n            \n            graphs[0]->select_gadgets(db);\n            ropchain = graphs[0]->get_ropchain(arch);\n            // nb += _assert_ropchain(ropchain, \"Basic application of strategy rule failed\");\n\n            delete arch;\n            delete comp;\n            */\n            return nb;\n        }\n        \n        // Buggy X64 syscall...\n        unsigned int test_x64_syscall(){\n            unsigned int nb = 0;\n            /*  DOESN'T WORK ANYMORE WITH NEW COMPILER MECANISM\n            \n            Arch* arch = new ArchX64();\n            GadgetDB db;\n            ROPChain* ropchain = nullptr;\n            \n            vector<RawGadget> raw;\n            raw.push_back(RawGadget(string(\"\\x58\\xC3\", 2), 1)); // pop rax; ret\n            raw.push_back(RawGadget(string(\"\\x5F\\xC3\", 2), 2)); // pop rdi; ret\n            raw.push_back(RawGadget(string(\"\\x83\\xC5\\x20\\x0F\\x05\", 5), 3)); // add ebp, 32; syscall\n            raw.push_back(RawGadget(string(\"\\x5E\\xC3\", 2), 4)); // pop rsi; ret\n            raw.push_back(RawGadget(string(\"\\x59xC3\", 2), 5)); // pop rcx; ret\n            raw.push_back(RawGadget(string(\"\\x41\\x5F\\xC3\", 3), 6)); // pop r15; ret\n            raw.push_back(RawGadget(string(\"\\x48\\x89\\xC2\\x41\\xFF\\xD7\", 6), 7)); // mov rdx, rax; call r15\n            db.analyse_raw_gadgets(raw, arch);\n            \n\n            // Test cst_pop on a function call strategy graph \n            vector<StrategyGraph*> graphs;\n            ROPCompiler* comp = new ROPCompiler(arch, &db);\n            string query = \"sys_11(1,2,3)\";\n            vector<ILInstruction> instrs = comp->parse(query);\n            comp->il_to_strategy(graphs, instrs[0], nullptr, ABI::NONE, System::LINUX);\n\n            // Apply strat\n            graphs[0]->rule_mov_cst_pop(1, arch);\n            graphs[0]->rule_mov_cst_pop(2, arch);\n            // Adjust rdx \n            graphs[0]->rule_generic_adjust_jmp(3, arch);\n            graphs[0]->rule_mov_cst_pop(7, arch);\n            graphs[0]->rule_generic_transitivity(3);\n            graphs[0]->rule_mov_cst_pop(10, arch);\n            // Adjust eax\n            graphs[0]->rule_mov_cst_pop(4, arch);\n\n            graphs[0]->select_gadgets(db, nullptr, arch);\n            ropchain = graphs[0]->get_ropchain(arch);\n            nb += _assert_ropchain(ropchain, \"Applications of rules to get syscall ropchain failed\");\n\n            for( auto g : graphs ){\n                delete g;\n            }\n            \n            delete arch;\n            delete comp;\n            \n            */\n\n            \n            \n            return nb;\n        }\n\n    }\n}\n\nusing namespace test::strategy;\n// All unit tests \nvoid test_strategy(){    \n    unsigned int total = 0;\n    string green = \"\\033[1;32m\";\n    string def = \"\\033[0m\";\n    string bold = \"\\033[1m\";\n\n    // Start testing \n    cout << bold << \"[\" << green << \"+\" << def << bold << \"]\" << def << \" Testing strategy graphs... \" << std::flush;\n    for( int i = 0; i < 1; i++){\n        total += basic();\n        total += rules();\n        total += test_generic_adjust_jmp();\n        total += test_adjust_load();\n        total += test_generic_src_transitivity();\n        total += test_adjust_store();\n        total += test_cst_pop();\n        total += test_x64_syscall();\n    }\n\n    // Return res\n    cout << \"\\t\\t\" << total << \"/\" << total << green << \"\\t\\tOK\" << def << endl;\n}\n"
  }
]