[
  {
    "path": ".git-blame-ignore-revs",
    "content": "# Black + pre-commit\n23503e79193a3cff5d6f1c92f22349fd2227d936 # Black\ncd758543f17a2253b5a0630327eac0ad6780217a # Trailing whitespace, pyupgrade, prefer builtin constructors\ndfd137fc8d3073ff065347401f528c1eaf62c383 # ruff\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug-report.yml",
    "content": "name: Report a bug\ndescription: Report a bug in pyvex\nlabels: [bug,needs-triage]\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Thank you for taking the time to submit this bug report!\n\n        Before submitting this bug report, please check the following, which may resolve your issue:\n        * Have you checked that you are running the latest versions of angr and its components? angr is rapidly-evolving!\n        * Have you [searched existing issues](https://github.com/angr/pyvex/issues?q=is%3Aopen+is%3Aissue+label%3Abug) to see if this bug has been reported before?\n        * Have you checked the [documentation](https://docs.angr.io/)?\n        * Have you checked the [FAQ](https://docs.angr.io/introductory-errata/faq)?\n\n        **Important:** If this bug is a security vulnerability, please submit it privately. See our [security policy](https://github.com/angr/angr/blob/master/SECURITY.md) for more details.\n\n        Please note: The angr suite is maintained by a small team. While we cannot guarantee any timeliness for fixes and enhancements, we will do our best. For more real-time help with angr, from us and the community, join our [Slack](https://angr.io/invite/).\n\n  - type: textarea\n    attributes:\n      label: Description\n      description: Brief description of the bug, with any relevant log messages.\n    validations:\n      required: true\n\n  - type: textarea\n    attributes:\n      label: Steps to reproduce the bug\n      description: |\n        If appropriate, include both a **script to reproduce the bug**, and if possible **attach the binary used**.\n\n        **Tip:** You can attach files to the issue by first clicking on the textarea to select it, then dragging & dropping the file onto the textarea.\n  - type: textarea\n    attributes:\n      label: Environment\n      description: Many common issues are caused by problems with the local Python environment. Before submitting, double-check that your versions of all modules in the angr suite (angr, cle, pyvex, ...) are up to date and include the output of `python -m angr.misc.bug_report` here.\n\n  - type: textarea\n    attributes:\n      label: Additional context\n      description: Any additional context about the problem.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "content": "blank_issues_enabled: false\ncontact_links:\n  - name: Join our Slack community\n    url: https://angr.io/invite/\n    about: For questions and help with angr, you are invited to join the angr Slack community\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature-request.yml",
    "content": "name: Request a feature\ndescription: Request a new feature for pyvex\nlabels: [enhancement,needs-triage]\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Thank you for taking the time to submit this feature request!\n\n        Before submitting this feature request, please check the following:\n        * Have you checked that you are running the latest versions of angr and its components? angr is rapidly-evolving!\n        * Have you checked the [documentation](https://docs.angr.io/) to see if this feature exists already?\n        * Have you [searched existing issues](https://github.com/angr/pyvex/issues?q=is%3Aissue+label%3Aenhancement+) to see if this feature has been requested before?\n\n        Please note: The angr suite is maintained by a small team. While we cannot guarantee any timeliness for fixes and enhancements, we will do our best. For more real-time help with angr, from us and the community, join our [Slack](https://angr.io/invite/).\n\n  - type: textarea\n    attributes:\n      label: Description\n      description: |\n        Brief description of the desired feature. If the feature is intended to solve some problem, please clearly describe the problem, including any relevant binaries, etc.\n\n        **Tip:** You can attach files to the issue by first clicking on the textarea to select it, then dragging & dropping the file onto the textarea.\n    validations:\n      required: true\n\n  - type: textarea\n    attributes:\n      label: Alternatives\n      description: Possible alternative solutions or features that you have considered.\n\n  - type: textarea\n    attributes:\n      label: Additional context\n      description: Any other context or screenshots about the feature request.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/question.yml",
    "content": "name: Ask a question\ndescription: Ask a question about pyvex\nlabels: [question,needs-triage]\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        If you have a question about pyvex, that is not a bug report or a feature request, you can ask it here. For more real-time help with pyvex, from us and the community, join our [Slack](https://angr.io/invite/).\n\n        Before submitting this question, please check the following, which may answer your question:\n        * Have you checked the [documentation](https://docs.angr.io/)?\n        * Have you checked the [FAQ](https://docs.angr.io/introductory-errata/faq)?\n        * Have you checked our library of [examples](https://github.com/angr/angr-doc/tree/master/examples)?\n        * Have you [searched existing issues](https://github.com/angr/pyvex/issues?q=is%3Aissue+label%3Aquestion) to see if this question has been answered before?\n        * Have you checked that you are running the latest versions of angr and its components. angr is rapidly-evolving!\n\n        Please note: The angr suite is maintained by a small team. While we cannot guarantee any timeliness for fixes and enhancements, we will do our best.\n\n  - type: textarea\n    attributes:\n      label: Question\n      description:\n    validations:\n      required: true\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    branches:\n      - master\n  pull_request:\n  workflow_dispatch:\n\njobs:\n  ecosystem:\n    uses: angr/ci-settings/.github/workflows/angr-ci.yml@master\n\n  test:\n    name: Test\n    strategy:\n      matrix:\n        os: [windows-2022, macos-15-intel, macos-15, ubuntu-24.04]\n      fail-fast: false\n    runs-on: ${{ matrix.os }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4\n        with:\n          submodules: 'recursive'\n      - name: Activate msvc\n        uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 # v1\n        if: startsWith(runner.os, 'windows')\n      - name: Setup uv\n        uses: astral-sh/setup-uv@445689ea25e0de0a23313031f5fe577c74ae45a1 # v6\n      - name: Sync dependencies\n        run: uv sync -p 3.10\n      - name: Run tests\n        run: uv run pytest tests\n\n"
  },
  {
    "path": ".github/workflows/cifuzz.yml",
    "content": "name: OSS-Fuzz\n\non:\n  # push:\n  #   branches:\n  #     - master\n  # pull_request:\n  workflow_dispatch:\n\npermissions: {}\n\njobs:\n Fuzzing:\n   runs-on: ubuntu-latest\n   permissions:\n     security-events: write\n   steps:\n   - name: Build Fuzzers\n     id: build\n     uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master\n     with:\n       oss-fuzz-project-name: 'pyvex'\n       language: python\n   - name: Run Fuzzers\n     uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master\n     with:\n       oss-fuzz-project-name: 'pyvex'\n       language: python\n       fuzz-seconds: 600\n       output-sarif: true\n   - name: Upload Crash\n     uses: actions/upload-artifact@v3\n     if: failure() && steps.build.outcome == 'success'\n     with:\n       name: artifacts\n       path: ./out/artifacts\n   - name: Upload Sarif\n     if: always() && steps.build.outcome == 'success'\n     uses: github/codeql-action/upload-sarif@v2\n     with:\n      # Path to SARIF file relative to the root of the repository\n      sarif_file: cifuzz-sarif/results.sarif\n      checkout_path: cifuzz-sarif\n"
  },
  {
    "path": ".github/workflows/nightly-ci.yml",
    "content": "name: Nightly CI\n\non:\n  schedule:\n  - cron: \"0 0 * * *\"\n  workflow_dispatch:\n\njobs:\n  ci:\n    uses: angr/ci-settings/.github/workflows/angr-ci.yml@master\n    with:\n      nightly: true\n    secrets: inherit\n"
  },
  {
    "path": ".gitignore",
    "content": "build\ndist\nMANIFEST\npyvex_python\nvex_ffi.py\nlibpyvex.so\n*.egg-info\n*.eggs\n*.pyc\n*.swp\n*.obj\n*.lib\n*.dll\n*.exp\n*.o\n*.a\n*.dylib\npyvex/lib\npyvex/include\nvex-master\nvex-master.tar.gz\ndocs/_build\nscikit_build\nuv.lock\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"vex\"]\n\tpath = vex\n\turl = https://github.com/angr/vex.git\n\tbranch = master\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "repos:\n\n#\n# Fail fast\n#\n\n-   repo: https://github.com/abravalheri/validate-pyproject\n    rev: v0.25\n    hooks:\n    - id: validate-pyproject\n      fail_fast: true\n\n-   repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v6.0.0\n    hooks:\n    # General\n    -   id: check-merge-conflict\n        fail_fast: true\n    -   id: check-case-conflict\n        fail_fast: true\n    -   id: destroyed-symlinks\n        fail_fast: true\n    -   id: check-symlinks\n        fail_fast: true\n    -   id: check-added-large-files\n        fail_fast: true\n    # Syntax\n    -   id: check-toml\n        fail_fast: true\n    -   id: check-json\n        fail_fast: true\n    -   id: check-yaml\n        fail_fast: true\n\n-   repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v6.0.0\n    hooks:\n    -   id: check-ast\n        fail_fast: true\n\n#\n# Modifiers\n#\n\n-   repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v6.0.0\n    hooks:\n    -   id: mixed-line-ending\n    -   id: trailing-whitespace\n\n-   repo: https://github.com/dannysepler/rm_unneeded_f_str\n    rev: v0.2.0\n    hooks:\n    -   id: rm-unneeded-f-str\n\n-   repo: https://github.com/asottile/pyupgrade\n    rev: v3.21.2\n    hooks:\n    -   id: pyupgrade\n        args: [--py310-plus]\n\n-   repo: https://github.com/astral-sh/ruff-pre-commit\n    rev: v0.15.10\n    hooks:\n    - id: ruff\n      args: [--fix, --exit-non-zero-on-fix]\n\n# Last modifier: Coding Standard\n-   repo: https://github.com/psf/black-pre-commit-mirror\n    rev: 26.3.1\n    hooks:\n    -   id: black\n\n#\n# Static Checks\n#\n\n-   repo: https://github.com/pre-commit/pygrep-hooks\n    rev: v1.10.0\n    hooks:\n    # Python\n    -   id: python-use-type-annotations\n    -   id: python-no-log-warn\n    # Documentation\n    -   id: rst-backticks\n    -   id: rst-directive-colons\n    -   id: rst-inline-touching-normal\n\n-   repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v6.0.0\n    hooks:\n    -   id: debug-statements\n    -   id: check-builtin-literals\n    -   id: check-docstring-first\n"
  },
  {
    "path": ".readthedocs.yml",
    "content": "# Read the Docs configuration file\n# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details\n\nversion: 2\n\nsphinx:\n  configuration: docs/conf.py\n\nsubmodules:\n  include: all\n\nbuild:\n  os: ubuntu-22.04\n  tools:\n    python: \"3.10\"\n  jobs:\n    pre_install:\n      - pip install -U pip\n      - pip install git+https://github.com/angr/archinfo.git\n\npython:\n  install:\n    - method: pip\n      path: .\n      extra_requirements:\n        - docs\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.15)\nset(CMAKE_POSITION_INDEPENDENT_CODE ON)\n\nproject(pyvex LANGUAGES C)\n\n# Set the output directory for built libraries\nset(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/pyvex/lib)\n\n# Set the C standard to C99\nset(CMAKE_C_STANDARD 99)\n\n# Include directories\ninclude_directories(\n    ${CMAKE_SOURCE_DIR}/pyvex/include\n    ${CMAKE_SOURCE_DIR}/pyvex_c\n    ${CMAKE_SOURCE_DIR}/vex/pub\n)\n\n# Source files for the pyvex C library\nset(PYVEX_SRC\n    pyvex_c/pyvex.c\n    pyvex_c/analysis.c\n    pyvex_c/logging.c\n    pyvex_c/postprocess.c\n)\n\n# Source files for the VEX C library\nset(VEX_SRC\n\tvex/priv/ir_defs.c\n\tvex/priv/ir_match.c\n\tvex/priv/ir_opt.c\n\tvex/priv/ir_inject.c\n\tvex/priv/main_globals.c\n\tvex/priv/main_util.c\n\tvex/priv/s390_disasm.c\n\tvex/priv/host_x86_defs.c\n\tvex/priv/host_amd64_defs.c\n\tvex/priv/host_arm_defs.c\n\tvex/priv/host_arm64_defs.c\n\tvex/priv/host_ppc_defs.c\n\tvex/priv/host_riscv64_defs.c\n\tvex/priv/host_s390_defs.c\n\tvex/priv/host_mips_defs.c\n\tvex/priv/host_x86_isel.c\n\tvex/priv/host_amd64_isel.c\n\tvex/priv/host_arm_isel.c\n\tvex/priv/host_arm64_isel.c\n\tvex/priv/host_ppc_isel.c\n\tvex/priv/host_riscv64_isel.c\n\tvex/priv/host_s390_isel.c\n\tvex/priv/host_mips_isel.c\n\tvex/priv/host_generic_maddf.c\n\tvex/priv/host_generic_regs.c\n\tvex/priv/host_generic_simd64.c\n\tvex/priv/host_generic_simd128.c\n\tvex/priv/host_generic_simd256.c\n\tvex/priv/host_generic_reg_alloc2.c\n\tvex/priv/host_generic_reg_alloc3.c\n\tvex/priv/guest_generic_x87.c\n\tvex/priv/guest_generic_bb_to_IR.c\n\tvex/priv/guest_x86_helpers.c\n\tvex/priv/guest_amd64_helpers.c\n\tvex/priv/guest_arm_helpers.c\n\tvex/priv/guest_arm64_helpers.c\n\tvex/priv/guest_ppc_helpers.c\n\tvex/priv/guest_riscv64_helpers.c\n\tvex/priv/guest_s390_helpers.c\n\tvex/priv/guest_mips_helpers.c\n\tvex/priv/guest_x86_toIR.c\n\tvex/priv/guest_amd64_toIR.c\n\tvex/priv/guest_arm_toIR.c\n\tvex/priv/guest_arm64_toIR.c\n\tvex/priv/guest_ppc_toIR.c\n\tvex/priv/guest_riscv64_toIR.c\n\tvex/priv/guest_s390_toIR.c\n\tvex/priv/guest_mips_toIR.c\n    vex/priv/multiarch_main_main.c\n)\n\n# Build the VEX static library\nadd_library(vex STATIC ${VEX_SRC})\ntarget_compile_definitions(vex PRIVATE PYVEX)\ntarget_include_directories(vex PUBLIC ${CMAKE_SOURCE_DIR}/vex/pub)\n\n# Build the shared library\nadd_library(pyvex SHARED ${PYVEX_SRC})\nset_target_properties(pyvex PROPERTIES OUTPUT_NAME \"pyvex\")\n\n# Handle .def file for Windows builds\nif (WIN32)\n    set_target_properties(pyvex PROPERTIES LINK_FLAGS \"/DEF:${CMAKE_SOURCE_DIR}/pyvex_c/pyvex.def\")\nendif()\n\ntarget_include_directories(pyvex PRIVATE pyvex_c)\n\ntarget_link_libraries(pyvex PRIVATE vex)\n\n# Install the built library to the Python package\n# It is installed twice to handle both editable and non-editable installs\ninstall(TARGETS pyvex DESTINATION ${CMAKE_SOURCE_DIR}/pyvex/lib)\ninstall(TARGETS pyvex DESTINATION pyvex/lib)\n\n# --- BEGIN: Generate pub/libvex_guest_offsets.h ---\nadd_executable(genoffsets vex/auxprogs/genoffsets.c)\nset_target_properties(genoffsets PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/vex/auxprogs)\n\nadd_custom_command(\n    OUTPUT ${CMAKE_SOURCE_DIR}/vex/pub/libvex_guest_offsets.h\n    COMMAND $<TARGET_FILE:genoffsets> > ${CMAKE_SOURCE_DIR}/vex/pub/libvex_guest_offsets.h\n    DEPENDS genoffsets\n    WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\n    COMMENT \"Generating pub/libvex_guest_offsets.h\"\n)\n\nadd_custom_target(generate_offsets_header\n    DEPENDS ${CMAKE_SOURCE_DIR}/vex/pub/libvex_guest_offsets.h\n)\ninstall(\n    FILES ${CMAKE_SOURCE_DIR}/vex/pub/libvex_guest_offsets.h\n    DESTINATION pyvex/include\n)\n\nadd_dependencies(vex generate_offsets_header)\n# --- END: Generate pub/libvex_guest_offsets.h ---\n\n# --- BEGIN: Generate pyvex/vex_ffi.py ---\nadd_custom_command(\n    OUTPUT ${CMAKE_SOURCE_DIR}/pyvex/vex_ffi.py\n    COMMAND ${CMAKE_COMMAND} -E env\n        ${Python3_EXECUTABLE} ${CMAKE_SOURCE_DIR}/make_ffi.py ${CMAKE_SOURCE_DIR}/vex/pub\n    DEPENDS ${CMAKE_SOURCE_DIR}/vex/pub/libvex_guest_offsets.h\n    WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\n    COMMENT \"Generating pyvex/vex_ffi.py using make_ffi.py\"\n)\n\nadd_custom_target(generate_vex_ffi_py\n    DEPENDS ${CMAKE_SOURCE_DIR}/pyvex/vex_ffi.py\n)\ninstall(\n    FILES ${CMAKE_SOURCE_DIR}/pyvex/vex_ffi.py\n    DESTINATION pyvex\n)\nadd_dependencies(pyvex generate_vex_ffi_py)\n# --- END: Generate pyvex/vex_ffi.py ---\n\n# --- BEGIN: Copy headers to pyvex/include ---\nadd_custom_command(\n    OUTPUT ${CMAKE_SOURCE_DIR}/pyvex/include/pub\n    COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/vex/pub ${CMAKE_SOURCE_DIR}/pyvex/include/\n    DEPENDS ${CMAKE_SOURCE_DIR}/vex/pub\n    COMMENT \"Copying vex/pub to pyvex/include/\"\n)\nadd_custom_command(\n    OUTPUT ${CMAKE_SOURCE_DIR}/pyvex/include/pyvex.h\n    COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/pyvex_c/pyvex.h ${CMAKE_SOURCE_DIR}/pyvex/include/pyvex.h\n    DEPENDS ${CMAKE_SOURCE_DIR}/pyvex_c/pyvex.h\n    COMMENT \"Copying pyvex_c/pyvex.h to pyvex/include/\"\n)\nadd_custom_target(copy_headers ALL\n    DEPENDS ${CMAKE_SOURCE_DIR}/pyvex/include/pub ${CMAKE_SOURCE_DIR}/pyvex/include/pyvex.h\n)\nadd_dependencies(pyvex copy_headers)\nadd_dependencies(copy_headers generate_offsets_header)\n# --- END: Copy headers to pyvex/include ---\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2015, The Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "include LICENSE\ninclude README.md\ninclude make_ffi.py\nrecursive-include pyvex_c *.c *.h *.def Makefile Makefile-msvc LICENSE\nrecursive-include vex *\n"
  },
  {
    "path": "README.md",
    "content": "# PyVEX\n[![Latest Release](https://img.shields.io/pypi/v/pyvex.svg)](https://pypi.python.org/pypi/pyvex/)\n[![Python Version](https://img.shields.io/pypi/pyversions/pyvex)](https://pypi.python.org/pypi/pyvex/)\n[![PyPI Statistics](https://img.shields.io/pypi/dm/pyvex.svg)](https://pypistats.org/packages/pyvex)\n[![License](https://img.shields.io/github/license/angr/pyvex.svg)](https://github.com/angr/pyvex/blob/master/LICENSE)\n\nPyVEX is Python bindings for the VEX IR.\n\n## Project Links\nProject repository: https://github.com/angr/pyvex\n\nDocumentation: https://api.angr.io/projects/pyvex/en/latest/\n\n## Installing PyVEX\n\nPyVEX can be pip-installed:\n\n```bash\npip install pyvex\n```\n\n## Using PyVEX\n\n```python\nimport pyvex\nimport archinfo\n\n# translate an AMD64 basic block (of nops) at 0x400400 into VEX\nirsb = pyvex.lift(b\"\\x90\\x90\\x90\\x90\\x90\", 0x400400, archinfo.ArchAMD64())\n\n# pretty-print the basic block\nirsb.pp()\n\n# this is the IR Expression of the jump target of the unconditional exit at the end of the basic block\nprint(irsb.next)\n\n# this is the type of the unconditional exit (i.e., a call, ret, syscall, etc)\nprint(irsb.jumpkind)\n\n# you can also pretty-print it\nirsb.next.pp()\n\n# iterate through each statement and print all the statements\nfor stmt in irsb.statements:\n    stmt.pp()\n\n# pretty-print the IR expression representing the data, and the *type* of that IR expression written by every store statement\nimport pyvex\nfor stmt in irsb.statements:\n    if isinstance(stmt, pyvex.IRStmt.Store):\n        print(\"Data:\", end=\"\")\n        stmt.data.pp()\n        print(\"\")\n\n        print(\"Type:\", end=\"\")\n        print(stmt.data.result_type)\n        print(\"\")\n\n# pretty-print the condition and jump target of every conditional exit from the basic block\nfor stmt in irsb.statements:\n    if isinstance(stmt, pyvex.IRStmt.Exit):\n        print(\"Condition:\", end=\"\")\n        stmt.guard.pp()\n        print(\"\")\n\n        print(\"Target:\", end=\"\")\n        stmt.dst.pp()\n        print(\"\")\n\n# these are the types of every temp in the IRSB\nprint(irsb.tyenv.types)\n\n# here is one way to get the type of temp 0\nprint(irsb.tyenv.types[0])\n```\n\nKeep in mind that this is a *syntactic* representation of a basic block. That is, it'll tell you what the block means, but you don't have any context to say, for example, what *actual* data is written by a store instruction.\n\n## VEX Intermediate Representation\n\nTo deal with widely diverse architectures, it is useful to carry out analyses on an intermediate representation.\nAn IR abstracts away several architecture differences when dealing with different architectures, allowing a single analysis to be run on all of them:\n\n- **Register names.** The quantity and names of registers differ between architectures, but modern CPU designs hold to a common theme: each CPU contains several general purpose registers, a register to hold the stack pointer, a set of registers to store condition flags, and so forth. The IR provides a consistent, abstracted interface to registers on different platforms. Specifically, VEX models the registers as a separate memory space, with integer offsets (i.e., AMD64's `rax` is stored starting at address 16 in this memory space).\n- **Memory access.** Different architectures access memory in different ways. For example, ARM can access memory in both little-endian and big-endian modes. The IR must abstract away these differences.\n- **Memory segmentation.** Some architectures, such as x86, support memory segmentation through the use of special segment registers. The IR understands such memory access mechanisms.\n- **Instruction side-effects.** Most instructions have side-effects. For example, most operations in Thumb mode on ARM update the condition flags, and stack push/pop instructions update the stack pointer. Tracking these side-effects in an *ad hoc* manner in the analysis would be crazy, so the IR makes these effects explicit.\n\nThere are lots of choices for an IR. We use VEX, since the uplifting of binary code into VEX is quite well supported.\nVEX is an architecture-agnostic, side-effects-free representation of a number of target machine languages.\nIt abstracts machine code into a representation designed to make program analysis easier.\nThis representation has five main classes of objects:\n\n- **Expressions.** IR Expressions represent a calculated or constant value. This includes memory loads, register reads, and results of arithmetic operations.\n- **Operations.** IR Operations describe a *modification* of IR Expressions. This includes integer arithmetic, floating-point arithmetic, bit operations, and so forth. An IR Operation applied to IR Expressions yields an IR Expression as a result.\n- **Temporary variables.** VEX uses temporary variables as internal registers: IR Expressions are stored in temporary variables between use. The content of a temporary variable can be retrieved using an IR Expression. These temporaries are numbered, starting at `t0`. These temporaries are strongly typed (i.e., \"64-bit integer\" or \"32-bit float\").\n- **Statements.** IR Statements model changes in the state of the target machine, such as the effect of memory stores and register writes. IR Statements use IR Expressions for values they may need. For example, a memory store *IR Statement* uses an *IR Expression* for the target address of the write, and another *IR Expression* for the content.\n- **Blocks.** An IR Block is a collection of IR Statements, representing an extended basic block (termed \"IR Super Block\" or \"IRSB\") in the target architecture. A block can have several exits. For conditional exits from the middle of a basic block, a special *Exit* IR Statement is used. An IR Expression is used to represent the target of the unconditional exit at the end of the block.\n\nVEX IR is actually quite well documented in the `libvex_ir.h` file (https://github.com/angr/vex/blob/dev/pub/libvex_ir.h) in the VEX repository. For the lazy, we'll detail some parts of VEX that you'll likely interact with fairly frequently. To begin with, here are some IR Expressions:\n\n| IR Expression | Evaluated Value | VEX Output Example |\n| ------------- | --------------- | ------- |\n| Constant | A constant value. | 0x4:I32 |\n| Read Temp | The value stored in a VEX temporary variable. | RdTmp(t10) |\n| Get Register | The value stored in a register. | GET:I32(16) |\n| Load Memory | The value stored at a memory address, with the address specified by another IR Expression. | LDle:I32 / LDbe:I64 |\n| Operation | A result of a specified IR Operation, applied to specified IR Expression arguments. | Add32 |\n| If-Then-Else | If a given IR Expression evaluates to 0, return one IR Expression. Otherwise, return another. | ITE |\n| Helper Function | VEX uses C helper functions for certain operations, such as computing the conditional flags registers of certain architectures. These functions return IR Expressions. | function\\_name() |\n\nThese expressions are then, in turn, used in IR Statements. Here are some common ones:\n\n| IR Statement | Meaning | VEX Output Example |\n| ------------ | ------- | ------------------ |\nWrite Temp | Set a VEX temporary variable to the value of the given IR Expression. | WrTmp(t1) = (IR Expression) |\nPut Register | Update a register with the value of the given IR Expression. | PUT(16) = (IR Expression) |\nStore Memory | Update a location in memory, given as an IR Expression, with a value, also given as an IR Expression. | STle(0x1000) = (IR Expression) |\nExit | A conditional exit from a basic block, with the jump target specified by an IR Expression. The condition is specified by an IR Expression. | if (condition) goto (Boring) 0x4000A00:I32 |\n\nAn example of an IR translation, on ARM, is produced below. In the example, the subtraction operation is translated into a single IR block comprising 5 IR Statements, each of which contains at least one IR Expression (although, in real life, an IR block would typically consist of more than one instruction). Register names are translated into numerical indices given to the *GET* Expression and *PUT* Statement.\nThe astute reader will observe that the actual subtraction is modeled by the first 4 IR Statements of the block, and the incrementing of the program counter to point to the next instruction (which, in this case, is located at `0x59FC8`) is modeled by the last statement.\n\nThe following ARM instruction:\n\n    subs R2, R2, #8\n\nBecomes this VEX IR:\n\n    t0 = GET:I32(16)\n    t1 = 0x8:I32\n    t3 = Sub32(t0,t1)\n    PUT(16) = t3\n    PUT(68) = 0x59FC8:I32\n\nCool stuff!\n\n## Citing PyVEX\n\nIf you use PyVEX in an academic work, please cite the paper for which it was developed:\n\n```bibtex\n@article{shoshitaishvili2015firmalice,\n  title={Firmalice - Automatic Detection of Authentication Bypass Vulnerabilities in Binary Firmware},\n  author={Shoshitaishvili, Yan and Wang, Ruoyu and Hauser, Christophe and Kruegel, Christopher and Vigna, Giovanni},\n  booktitle={NDSS},\n  year={2015}\n}\n```\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line, and also\n# from the environment for the first two.\nSPHINXOPTS    ?=\nSPHINXBUILD   ?= sphinx-build\nSOURCEDIR     = .\nBUILDDIR      = _build\n\n# Put it first so that \"make\" without argument is like \"make help\".\nhelp:\n\t@$(SPHINXBUILD) -M help \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n\n.PHONY: help Makefile\n\n# Catch-all target: route all unknown targets to Sphinx using the new\n# \"make mode\" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).\n%: Makefile\n\t@$(SPHINXBUILD) -M $@ \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n"
  },
  {
    "path": "docs/api.rst",
    "content": ":mod:`pyvex` --- Binary Translator\n==================================\n\n.. automodule:: pyvex\n.. automodule:: pyvex.native\n\n\nTranslation Interface\n---------------------\n\n.. automodule:: pyvex.block\n\n\nIR Components\n-------------\n\n.. automodule:: pyvex.stmt\n.. automodule:: pyvex.expr\n.. automodule:: pyvex.const\n.. automodule:: pyvex.enums\n\nLifting System\n--------------\n\n.. automodule:: pyvex.data_ref\n.. automodule:: pyvex.lifting\n.. automodule:: pyvex.lifting.lift_function\n.. automodule:: pyvex.lifting.libvex\n.. automodule:: pyvex.lifting.lifter\n.. automodule:: pyvex.lifting.post_processor\n.. automodule:: pyvex.lifting.util.irsb_postprocess\n.. automodule:: pyvex.lifting.util\n.. automodule:: pyvex.lifting.util.syntax_wrapper\n.. automodule:: pyvex.lifting.util.vex_helper\n.. automodule:: pyvex.lifting.util.lifter_helper\n.. automodule:: pyvex.lifting.util.instr_helper\n\nBuiltin IR Processors\n---------------------\n\n.. automodule:: pyvex.lifting.zerodivision\n\nErrors\n------\n\n.. automodule:: pyvex.errors\n\nUtilities\n---------\n\n.. automodule:: pyvex.utils\n"
  },
  {
    "path": "docs/conf.py",
    "content": "# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\nimport datetime\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"pyvex\"\nproject_copyright = f\"{datetime.datetime.now().year}, The angr Project contributors\"\nauthor = \"The angr Project\"\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n    \"sphinx.ext.autodoc\",\n    \"sphinx.ext.autosummary\",\n    \"sphinx.ext.coverage\",\n    \"sphinx.ext.napoleon\",\n    \"sphinx.ext.todo\",\n    \"sphinx.ext.viewcode\",\n    \"sphinx_autodoc_typehints\",\n    \"myst_parser\",\n]\n\ntemplates_path = [\"_templates\"]\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# -- Options for autodoc -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration\nautoclass_content = \"class\"\nautodoc_default_options = {\n    \"members\": True,\n    \"member-order\": \"bysource\",\n    \"show-inheritance\": True,\n    \"special-members\": \"__init__\",\n    \"undoc-members\": True,\n}\nautodoc_inherit_docstrings = True\nautodoc_typehints = \"both\"\n\n# -- Options for coverage ----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/coverage.html\ncoverage_write_headline = False\n\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = \"furo\"\nhtml_static_path = [\"_static\"]\n"
  },
  {
    "path": "docs/index.rst",
    "content": "Welcome to pyVEX's documentation!\n=================================\n\n\n.. toctree::\n   :maxdepth: 2\n   :caption: Contents:\n\n   Quickstart <quickstart>\n   API <api>\n\n\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n"
  },
  {
    "path": "docs/make.bat",
    "content": "@ECHO OFF\n\npushd %~dp0\n\nREM Command file for Sphinx documentation\n\nif \"%SPHINXBUILD%\" == \"\" (\n\tset SPHINXBUILD=sphinx-build\n)\nset SOURCEDIR=.\nset BUILDDIR=_build\n\n%SPHINXBUILD% >NUL 2>NUL\nif errorlevel 9009 (\n\techo.\n\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx\n\techo.installed, then set the SPHINXBUILD environment variable to point\n\techo.to the full path of the 'sphinx-build' executable. Alternatively you\n\techo.may add the Sphinx directory to PATH.\n\techo.\n\techo.If you don't have Sphinx installed, grab it from\n\techo.https://www.sphinx-doc.org/\n\texit /b 1\n)\n\nif \"%1\" == \"\" goto help\n\n%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%\ngoto end\n\n:help\n%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%\n\n:end\npopd\n"
  },
  {
    "path": "docs/quickstart.rst",
    "content": ".. include:: ../README.md\n   :parser: myst_parser.sphinx_\n"
  },
  {
    "path": "fuzzing/build.sh",
    "content": "#!/bin/bash -eu\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n################################################################################\n\n# Since pyvex requires a specific developer build of archinfo, install it from source\ncd \"$SRC\"/archinfo\npython3 -m pip install .\n\ncd \"$SRC\"/pyvex\npython3 -m pip install .[testing]\n\n# Generate a simple binary for the corpus\necho -ne \"start:\\n\\txor %edi, %edi\\nmov \\$60, %eax\\nsyscall\" > /tmp/corpus.s\nclang -Os -s /tmp/corpus.s -nostdlib -nostartfiles -m32 -o corpus\nzip -r \"$OUT\"/irsb_fuzzer_seed_corpus.zip corpus\n\n# Build fuzzers in $OUT\n# --collect-submodules=bitstring ensures all bitstring submodules are bundled by PyInstaller\nfor fuzzer in $(find $SRC -name '*_fuzzer.py'); do\n  compile_python_fuzzer \"$fuzzer\" \\\n    --add-binary=\"pyvex/lib/libpyvex.so:pyvex/lib\" \\\n    --collect-submodules=bitstring\ndone\n"
  },
  {
    "path": "fuzzing/enhanced_fdp.py",
    "content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n################################################################################\n\"\"\"\nDefines the EnhancedFuzzedDataProvider\n\"\"\"\n\nfrom atheris import FuzzedDataProvider\n\n\nclass EnhancedFuzzedDataProvider(FuzzedDataProvider):\n    \"\"\"\n    Extends the functionality of FuzzedDataProvider\n    \"\"\"\n\n    def _consume_random_count(self) -> int:\n        \"\"\"\n        :return: A count of bytes that is strictly in range 0<=n<=remaining_bytes\n        \"\"\"\n        return self.ConsumeIntInRange(0, self.remaining_bytes())\n\n    def ConsumeRandomBytes(self) -> bytes:\n        \"\"\"\n        Consume a 'random' count of the remaining bytes\n        :return: 0<=n<=remaining_bytes bytes\n        \"\"\"\n        return self.ConsumeBytes(self._consume_random_count())\n\n    def ConsumeRemainingBytes(self) -> bytes:\n        \"\"\"\n        :return: The remaining buffer\n        \"\"\"\n        return self.ConsumeBytes(self.remaining_bytes())\n\n    def ConsumeRandomString(self) -> str:\n        \"\"\"\n        Consume a 'random' length string, excluding surrogates\n        :return: The string\n        \"\"\"\n        return self.ConsumeUnicodeNoSurrogates(self._consume_random_count())\n\n    def ConsumeRemainingString(self) -> str:\n        \"\"\"\n        :return: The remaining buffer, as a string without surrogates\n        \"\"\"\n        return self.ConsumeUnicodeNoSurrogates(self.remaining_bytes())\n\n    def PickValueInEnum(self, enum):\n        return self.PickValueInList([e.value for e in enum])\n"
  },
  {
    "path": "fuzzing/irsb_fuzzer.py",
    "content": "#!/usr/bin/python3\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n################################################################################\nimport re\nimport sys\nfrom contextlib import contextmanager\nfrom enum import IntEnum\nfrom io import StringIO\n\nimport atheris\n\nwith atheris.instrument_imports(include=[\"pyvex\"]):\n    import pyvex\n\n# Additional imports\nfrom enhanced_fdp import EnhancedFuzzedDataProvider\n\nregister_error_msg = re.compile(\"Register .*? does not exist!\")\n\n\n@contextmanager\ndef nostdout():\n    saved_stdout = sys.stdout\n    saved_stderr = sys.stderr\n    sys.stdout = StringIO()\n    sys.stderr = StringIO()\n    yield\n    sys.stdout = saved_stdout\n    sys.stderr = saved_stderr\n\n\n# Save all available architectures off\navailable_archs = [\n    pyvex.ARCH_X86,\n    pyvex.ARCH_AMD64,\n    pyvex.ARCH_ARM_LE,\n    pyvex.ARCH_ARM_BE,\n    pyvex.ARCH_ARM64_LE,\n    pyvex.ARCH_ARM64_BE,\n    pyvex.ARCH_PPC32,\n    pyvex.ARCH_PPC64_BE,\n    pyvex.ARCH_PPC64_LE,\n    pyvex.ARCH_S390X,\n    pyvex.ARCH_MIPS32_BE,\n    pyvex.ARCH_MIPS32_LE,\n    pyvex.ARCH_MIPS64_BE,\n    pyvex.ARCH_MIPS64_LE,\n]\n\n\nclass SupportedOptLevels(IntEnum):\n    \"\"\"\n    Enumerates the supported optimization levels within pyvex, as derived from the documentation\n    \"\"\"\n\n    StrictUnopt = -1\n    Unopt = 0\n    Opt = 1\n    StrictOpt = 2\n\n\ndef consume_random_arch(fdp: atheris.FuzzedDataProvider) -> pyvex.arches.PyvexArch:\n    return fdp.PickValueInList(available_archs)\n\n\ndef TestOneInput(data: bytes):\n    fdp = EnhancedFuzzedDataProvider(data)\n\n    arch = consume_random_arch(fdp)\n\n    try:\n        with nostdout():\n            data = fdp.ConsumeRandomBytes()\n            max_bytes = fdp.ConsumeIntInRange(0, len(data))\n            irsb = pyvex.lift(\n                data,\n                fdp.ConsumeInt(arch.bits),\n                arch,\n                max_bytes=fdp.ConsumeIntInRange(0, len(data)),\n                max_inst=fdp.ConsumeInt(16),\n                bytes_offset=fdp.ConsumeIntInRange(0, max_bytes),\n                opt_level=fdp.PickValueInEnum(SupportedOptLevels),\n            )\n            irsb.pp()\n        return 0\n    except pyvex.PyVEXError:\n        return -1\n    except ValueError as e:\n        if re.match(register_error_msg, str(e)):\n            return -1\n        raise e\n    except OverflowError:\n        return -1\n\n\ndef main():\n    atheris.Setup(sys.argv, TestOneInput)\n    atheris.Fuzz()\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "make_ffi.py",
    "content": "import logging\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\n\nimport cffi\n\nlog = logging.getLogger(\"cffier\")\nlog.setLevel(logging.DEBUG)\n\n\ndef find_good_scan(questionable):\n    known_good = []\n\n    end_line = len(questionable)\n\n    while len(questionable):\n        ffi = cffi.FFI()\n        log.debug(\"scan - trying %d good and %d questionable\", len(known_good), len(questionable))\n\n        candidate = known_good + questionable[:end_line]\n        failed_line = -1\n\n        try:\n            ffi.cdef(\"\\n\".join(candidate))\n\n            known_good = candidate\n            questionable = questionable[end_line:]\n            end_line = len(questionable)\n        except AssertionError:\n            questionable = questionable[1:]\n            end_line = len(questionable)\n        except cffi.CDefError as e:\n            if \"<cdef source string>\" in str(e):\n                failed_line = int(str(e).split(\"\\n\")[-1].split(\":\")[1]) - 1\n            elif str(e).count(\":\") >= 2:\n                failed_line = int(str(e).split(\"\\n\")[1].split(\":\")[1])\n                failed_line_description = str(e).split(\"\\n\")[0]\n                idx1 = failed_line_description.index('\"')\n                idx2 = failed_line_description.rindex('\"')\n                failed_reason = failed_line_description[idx1 + 1 : idx2]\n\n                for i in range(failed_line, -1, -1):\n                    if failed_reason in candidate[i]:\n                        failed_line = i\n            elif \"unrecognized construct\" in str(e):\n                failed_line = int(str(e).split()[1][:-1]) - 1\n            elif \"end of input\" in str(e):\n                end_line -= 1\n            else:\n                raise Exception(\"Unknown error\")\n        except cffi.FFIError as e:\n            if str(e).count(\":\") >= 2:\n                failed_line = int(str(e).split(\"\\n\")[0].split(\":\")[1]) - 1\n            else:\n                raise Exception(\"Unknown error\")\n\n        if failed_line != -1:\n            end_line = failed_line - len(known_good)\n\n        if end_line == 0:\n            questionable = questionable[1:]\n            end_line = len(questionable)\n    return known_good\n\n\ndef doit(vex_path):\n    cpplist = [\"cl\", \"cpp\"]\n    cpp = os.getenv(\"CPP\")\n    if cpp:\n        cpplist.insert(0, cpp)\n    if platform.system() == \"Darwin\":\n        cpplist.insert(0, \"clang\")\n\n    errs = []\n    for cpp in cpplist:\n        cmd = [cpp, \"-I\" + vex_path, os.path.join(\"pyvex_c\", \"pyvex.h\")]\n        if cpp in (\"cl\", \"clang\", \"gcc\", \"cc\", \"clang++\", \"g++\"):\n            cmd.append(\"-E\")\n        try:\n            p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n            header, stderr = p.communicate()\n            try:\n                header = header.decode(\"utf-8\")\n                stderr = stderr.decode(\"utf-8\")\n            except UnicodeDecodeError:\n                # They don't have to be unicode on Windows\n                pass\n\n            if not header.strip() or p.returncode != 0:\n                errs.append((\" \".join(cmd), p.returncode, stderr))\n                continue\n            else:\n                break\n        except OSError:\n            errs.append((\" \".join(cmd), -1, \"does not exist\"))\n            continue\n    else:\n        log.warning(\"failed commands:\\n\" + \"\\n\".join(\"{} ({}) -- {}\".format(*e) for e in errs))\n        raise Exception(\n            \"Couldn't process pyvex headers.\"\n            + 'Please set CPP environmental variable to local path of \"cpp\".'\n            + 'Note that \"cpp\" and \"g++\" are different.'\n        )\n    # header = vex_pp + pyvex_pp\n\n    linesep = \"\\r\\n\" if \"\\r\\n\" in header else \"\\n\"\n    ffi_text = linesep.join(\n        line\n        for line in header.split(linesep)\n        if \"#\" not in line and line.strip() != \"\" and \"jmp_buf\" not in line and not (\"=\" in line and \";\" in line)\n    )\n    ffi_text = re.sub(r\"\\{\\s*\\} NoOp;\", \"{ int DONOTUSE; } NoOp;\", ffi_text)\n    ffi_text = re.sub(r\"__attribute__\\s*\\(.*\\)\", \"\", ffi_text)\n    ffi_text = re.sub(r\"__declspec\\s*\\([^\\)]*\\)\", \"\", ffi_text)\n    ffi_text = ffi_text.replace(\"__const\", \"const\")\n    ffi_text = ffi_text.replace(\"__inline\", \"\")\n    ffi_text = ffi_text.replace(\"__w64\", \"\")\n    ffi_text = ffi_text.replace(\"__cdecl\", \"\")\n    ffi_text = ffi_text.replace(\"__int64\", \"long\")\n    ffi_lines = ffi_text.split(linesep)\n\n    good = find_good_scan(ffi_lines)\n    good += [\"extern VexControl vex_control;\"]\n\n    with open(\"pyvex/vex_ffi.py\", \"w\") as fp:\n        fp.write('ffi_str = \"\"\"' + \"\\n\".join(good) + '\"\"\"\\n')\n        fp.write(\"guest_offsets = \" + repr(get_guest_offsets(vex_path)) + \"\\n\")\n\n\ndef get_guest_offsets(vex_path):\n    fname = os.path.join(vex_path, \"libvex_guest_offsets.h\")\n    out = {}\n    with open(fname) as fp:\n        for line in fp:\n            if line.startswith(\"#define\"):\n                _, names, val = line.split()\n                val = int(val, 0)\n                assert names.startswith(\"OFFSET_\")\n                _, arch, reg = names.split(\"_\", 2)\n                out[(arch, reg.lower())] = val\n    return out\n\n\nif __name__ == \"__main__\":\n    logging.basicConfig(level=logging.DEBUG)\n    doit(sys.argv[1])\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"scikit-build-core >= 0.11.4, < 0.12.0\", \"cffi >= 1.0.3;implementation_name == 'cpython'\"]\nbuild-backend = \"scikit_build_core.build\"\n\n[project]\nname = \"pyvex\"\ndescription = \"A Python interface to libVEX and VEX IR\"\nlicense = \"BSD-2-Clause AND GPL-2.0-only\"\nlicense-files = [\n  \"LICENSE\",\n  \"pyvex_c/LICENSE\",\n  \"vex/LICENSE.README\",\n  \"vex/LICENSE.GPL\",\n]\nclassifiers = [\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3 :: Only\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\",\n    \"Programming Language :: Python :: 3.13\",\n]\nrequires-python = \">=3.10\"\ndependencies = [\n    \"bitstring\",\n    \"cffi>=1.0.3;implementation_name == 'cpython'\",\n]\ndynamic = [\"version\"]\n\n[project.readme]\nfile = \"README.md\"\ncontent-type = \"text/markdown\"\n\n[project.urls]\nHomepage = \"https://api.angr.io/projects/pyvex/en/latest/\"\nRepository = \"https://github.com/angr/pyvex\"\n\n[project.optional-dependencies]\ndocs = [\n    \"furo\",\n    \"myst-parser\",\n    \"sphinx\",\n    \"sphinx-autodoc-typehints\",\n]\nfuzzing = [\n    \"atheris>=2.3.0\",\n]\ntesting = [\n    \"pytest\",\n    \"pytest-xdist\",\n]\n\n[dependency-groups]\ndev = [\n    \"pytest>=8.4.1\",\n]\n\n[tool.scikit-build]\nbuild-dir = \"scikit_build\"\n\n[tool.scikit-build.sdist]\ninclude = [\n  \"pyvex/py.typed\",\n  \"pyvex/include/*\",\n]\nexclude = [\n  \"tests*\"\n]\n\n[tool.scikit-build.metadata.version]\nprovider = \"scikit_build_core.metadata.regex\"\ninput = \"pyvex/__init__.py\"\n\n[tool.black]\nline-length = 120\ntarget-version = ['py310']\nforce-exclude = '''\n/(\n  vex\n)/\n'''\n\n[tool.ruff]\nline-length = 120\n\n[tool.ruff.lint]\nselect = [\n  \"E\",\n  \"F\",\n  \"I\",\n  \"TID\",\n]\n"
  },
  {
    "path": "pyvex/__init__.py",
    "content": "\"\"\"\nPyVEX provides an interface that translates binary code into the VEX intermediate representation (IR).\nFor an introduction to VEX, take a look here: https://docs.angr.io/advanced-topics/ir\n\"\"\"\n\n__version__ = \"9.2.212.dev0\"\n\nfrom . import const, expr, stmt\nfrom .arches import (\n    ARCH_AMD64,\n    ARCH_ARM64_BE,\n    ARCH_ARM64_LE,\n    ARCH_ARM_BE,\n    ARCH_ARM_BE_LE,\n    ARCH_ARM_LE,\n    ARCH_MIPS32_BE,\n    ARCH_MIPS32_LE,\n    ARCH_MIPS64_BE,\n    ARCH_MIPS64_LE,\n    ARCH_PPC32,\n    ARCH_PPC64_BE,\n    ARCH_PPC64_LE,\n    ARCH_RISCV64_LE,\n    ARCH_S390X,\n    ARCH_X86,\n)\nfrom .block import IRSB, IRTypeEnv\nfrom .const import get_type_size, get_type_spec_size, tag_to_const_class\nfrom .enums import (\n    IRCallee,\n    IRRegArray,\n    VEXObject,\n    default_vex_archinfo,\n    get_enum_from_int,\n    get_int_from_enum,\n    irop_enums_to_ints,\n    vex_endness_from_string,\n)\nfrom .errors import PyVEXError\nfrom .expr import get_op_retty\nfrom .lifting import lift, lifters\nfrom .native import ffi, pvc\n\n# aliases....\nIRStmt = stmt\nIRExpr = expr\nIRConst = const\n\n\n__all__ = [\n    \"const\",\n    \"expr\",\n    \"stmt\",\n    \"IRSB\",\n    \"IRTypeEnv\",\n    \"get_type_size\",\n    \"get_type_spec_size\",\n    \"irop_enums_to_ints\",\n    \"tag_to_const_class\",\n    \"IRCallee\",\n    \"IRRegArray\",\n    \"VEXObject\",\n    \"default_vex_archinfo\",\n    \"get_enum_from_int\",\n    \"get_int_from_enum\",\n    \"vex_endness_from_string\",\n    \"PyVEXError\",\n    \"get_op_retty\",\n    \"lift\",\n    \"lifters\",\n    \"ffi\",\n    \"pvc\",\n    \"IRStmt\",\n    \"IRExpr\",\n    \"IRConst\",\n    \"ARCH_X86\",\n    \"ARCH_AMD64\",\n    \"ARCH_ARM_BE\",\n    \"ARCH_ARM_BE_LE\",\n    \"ARCH_ARM_LE\",\n    \"ARCH_ARM64_LE\",\n    \"ARCH_ARM64_BE\",\n    \"ARCH_PPC32\",\n    \"ARCH_PPC64_BE\",\n    \"ARCH_PPC64_LE\",\n    \"ARCH_S390X\",\n    \"ARCH_MIPS32_BE\",\n    \"ARCH_MIPS32_LE\",\n    \"ARCH_MIPS64_BE\",\n    \"ARCH_MIPS64_LE\",\n    \"ARCH_RISCV64_LE\",\n]\n"
  },
  {
    "path": "pyvex/_register_info.py",
    "content": "REGISTER_OFFSETS = {\n    (\"x86\", \"eax\"): 8,\n    (\"x86\", \"ax\"): 8,\n    (\"x86\", \"al\"): 8,\n    (\"x86\", \"ah\"): 9,\n    (\"x86\", \"ecx\"): 12,\n    (\"x86\", \"cx\"): 12,\n    (\"x86\", \"cl\"): 12,\n    (\"x86\", \"ch\"): 13,\n    (\"x86\", \"edx\"): 16,\n    (\"x86\", \"dx\"): 16,\n    (\"x86\", \"dl\"): 16,\n    (\"x86\", \"dh\"): 17,\n    (\"x86\", \"ebx\"): 20,\n    (\"x86\", \"bx\"): 20,\n    (\"x86\", \"bl\"): 20,\n    (\"x86\", \"bh\"): 21,\n    (\"x86\", \"esp\"): 24,\n    (\"x86\", \"sp\"): 24,\n    (\"x86\", \"ebp\"): 28,\n    (\"x86\", \"bp\"): 28,\n    (\"x86\", \"esi\"): 32,\n    (\"x86\", \"si\"): 32,\n    (\"x86\", \"sil\"): 32,\n    (\"x86\", \"sih\"): 33,\n    (\"x86\", \"edi\"): 36,\n    (\"x86\", \"di\"): 36,\n    (\"x86\", \"dil\"): 36,\n    (\"x86\", \"dih\"): 37,\n    (\"x86\", \"cc_op\"): 40,\n    (\"x86\", \"cc_dep1\"): 44,\n    (\"x86\", \"cc_dep2\"): 48,\n    (\"x86\", \"cc_ndep\"): 52,\n    (\"x86\", \"d\"): 56,\n    (\"x86\", \"dflag\"): 56,\n    (\"x86\", \"id\"): 60,\n    (\"x86\", \"idflag\"): 60,\n    (\"x86\", \"ac\"): 64,\n    (\"x86\", \"acflag\"): 64,\n    (\"x86\", \"eip\"): 68,\n    (\"x86\", \"ip\"): 68,\n    (\"x86\", \"pc\"): 68,\n    (\"x86\", \"fpreg\"): 72,\n    (\"x86\", \"fpu_regs\"): 72,\n    (\"x86\", \"mm0\"): 72,\n    (\"x86\", \"mm1\"): 80,\n    (\"x86\", \"mm2\"): 88,\n    (\"x86\", \"mm3\"): 96,\n    (\"x86\", \"mm4\"): 104,\n    (\"x86\", \"mm5\"): 112,\n    (\"x86\", \"mm6\"): 120,\n    (\"x86\", \"mm7\"): 128,\n    (\"x86\", \"fptag\"): 136,\n    (\"x86\", \"fpu_tags\"): 136,\n    (\"x86\", \"fpround\"): 144,\n    (\"x86\", \"fc3210\"): 148,\n    (\"x86\", \"ftop\"): 152,\n    (\"x86\", \"sseround\"): 156,\n    (\"x86\", \"xmm0\"): 160,\n    (\"x86\", \"xmm1\"): 176,\n    (\"x86\", \"xmm2\"): 192,\n    (\"x86\", \"xmm3\"): 208,\n    (\"x86\", \"xmm4\"): 224,\n    (\"x86\", \"xmm5\"): 240,\n    (\"x86\", \"xmm6\"): 256,\n    (\"x86\", \"xmm7\"): 272,\n    (\"x86\", \"cs\"): 288,\n    (\"x86\", \"ds\"): 290,\n    (\"x86\", \"es\"): 292,\n    (\"x86\", \"fs\"): 294,\n    (\"x86\", \"gs\"): 296,\n    (\"x86\", \"ss\"): 298,\n    (\"x86\", \"ldt\"): 304,\n    (\"x86\", \"gdt\"): 312,\n    (\"x86\", \"emnote\"): 320,\n    (\"x86\", \"cmstart\"): 324,\n    (\"x86\", \"cmlen\"): 328,\n    (\"x86\", \"nraddr\"): 332,\n    (\"x86\", \"sc_class\"): 336,\n    (\"x86\", \"ip_at_syscall\"): 340,\n    (\"amd64\", \"rax\"): 16,\n    (\"amd64\", \"eax\"): 16,\n    (\"amd64\", \"ax\"): 16,\n    (\"amd64\", \"al\"): 16,\n    (\"amd64\", \"ah\"): 17,\n    (\"amd64\", \"rcx\"): 24,\n    (\"amd64\", \"ecx\"): 24,\n    (\"amd64\", \"cx\"): 24,\n    (\"amd64\", \"cl\"): 24,\n    (\"amd64\", \"ch\"): 25,\n    (\"amd64\", \"rdx\"): 32,\n    (\"amd64\", \"edx\"): 32,\n    (\"amd64\", \"dx\"): 32,\n    (\"amd64\", \"dl\"): 32,\n    (\"amd64\", \"dh\"): 33,\n    (\"amd64\", \"rbx\"): 40,\n    (\"amd64\", \"ebx\"): 40,\n    (\"amd64\", \"bx\"): 40,\n    (\"amd64\", \"bl\"): 40,\n    (\"amd64\", \"bh\"): 41,\n    (\"amd64\", \"rsp\"): 48,\n    (\"amd64\", \"sp\"): 48,\n    (\"amd64\", \"esp\"): 48,\n    (\"amd64\", \"rbp\"): 56,\n    (\"amd64\", \"bp\"): 56,\n    (\"amd64\", \"ebp\"): 56,\n    (\"amd64\", \"_bp\"): 56,\n    (\"amd64\", \"bpl\"): 56,\n    (\"amd64\", \"bph\"): 57,\n    (\"amd64\", \"rsi\"): 64,\n    (\"amd64\", \"esi\"): 64,\n    (\"amd64\", \"si\"): 64,\n    (\"amd64\", \"sil\"): 64,\n    (\"amd64\", \"sih\"): 65,\n    (\"amd64\", \"rdi\"): 72,\n    (\"amd64\", \"edi\"): 72,\n    (\"amd64\", \"di\"): 72,\n    (\"amd64\", \"dil\"): 72,\n    (\"amd64\", \"dih\"): 73,\n    (\"amd64\", \"r8\"): 80,\n    (\"amd64\", \"r8d\"): 80,\n    (\"amd64\", \"r8w\"): 80,\n    (\"amd64\", \"r8b\"): 80,\n    (\"amd64\", \"r9\"): 88,\n    (\"amd64\", \"r9d\"): 88,\n    (\"amd64\", \"r9w\"): 88,\n    (\"amd64\", \"r9b\"): 88,\n    (\"amd64\", \"r10\"): 96,\n    (\"amd64\", \"r10d\"): 96,\n    (\"amd64\", \"r10w\"): 96,\n    (\"amd64\", \"r10b\"): 96,\n    (\"amd64\", \"r11\"): 104,\n    (\"amd64\", \"r11d\"): 104,\n    (\"amd64\", \"r11w\"): 104,\n    (\"amd64\", \"r11b\"): 104,\n    (\"amd64\", \"r12\"): 112,\n    (\"amd64\", \"r12d\"): 112,\n    (\"amd64\", \"r12w\"): 112,\n    (\"amd64\", \"r12b\"): 112,\n    (\"amd64\", \"r13\"): 120,\n    (\"amd64\", \"r13d\"): 120,\n    (\"amd64\", \"r13w\"): 120,\n    (\"amd64\", \"r13b\"): 120,\n    (\"amd64\", \"r14\"): 128,\n    (\"amd64\", \"r14d\"): 128,\n    (\"amd64\", \"r14w\"): 128,\n    (\"amd64\", \"r14b\"): 128,\n    (\"amd64\", \"r15\"): 136,\n    (\"amd64\", \"r15d\"): 136,\n    (\"amd64\", \"r15w\"): 136,\n    (\"amd64\", \"r15b\"): 136,\n    (\"amd64\", \"cc_op\"): 144,\n    (\"amd64\", \"cc_dep1\"): 152,\n    (\"amd64\", \"cc_dep2\"): 160,\n    (\"amd64\", \"cc_ndep\"): 168,\n    (\"amd64\", \"d\"): 176,\n    (\"amd64\", \"dflag\"): 176,\n    (\"amd64\", \"rip\"): 184,\n    (\"amd64\", \"ip\"): 184,\n    (\"amd64\", \"pc\"): 184,\n    (\"amd64\", \"ac\"): 192,\n    (\"amd64\", \"acflag\"): 192,\n    (\"amd64\", \"id\"): 200,\n    (\"amd64\", \"idflag\"): 200,\n    (\"amd64\", \"fs\"): 208,\n    (\"amd64\", \"fs_const\"): 208,\n    (\"amd64\", \"sseround\"): 216,\n    (\"amd64\", \"cr0\"): 768,\n    (\"amd64\", \"cr2\"): 784,\n    (\"amd64\", \"cr3\"): 792,\n    (\"amd64\", \"cr4\"): 800,\n    (\"amd64\", \"cr8\"): 832,\n    (\"amd64\", \"ymm0\"): 224,\n    (\"amd64\", \"xmm0\"): 224,\n    (\"amd64\", \"xmm0lq\"): 224,\n    (\"amd64\", \"xmm0hq\"): 232,\n    (\"amd64\", \"ymm0hx\"): 240,\n    (\"amd64\", \"ymm1\"): 256,\n    (\"amd64\", \"xmm1\"): 256,\n    (\"amd64\", \"xmm1lq\"): 256,\n    (\"amd64\", \"xmm1hq\"): 264,\n    (\"amd64\", \"ymm1hx\"): 272,\n    (\"amd64\", \"ymm2\"): 288,\n    (\"amd64\", \"xmm2\"): 288,\n    (\"amd64\", \"xmm2lq\"): 288,\n    (\"amd64\", \"xmm2hq\"): 296,\n    (\"amd64\", \"ymm2hx\"): 304,\n    (\"amd64\", \"ymm3\"): 320,\n    (\"amd64\", \"xmm3\"): 320,\n    (\"amd64\", \"xmm3lq\"): 320,\n    (\"amd64\", \"xmm3hq\"): 328,\n    (\"amd64\", \"ymm3hx\"): 336,\n    (\"amd64\", \"ymm4\"): 352,\n    (\"amd64\", \"xmm4\"): 352,\n    (\"amd64\", \"xmm4lq\"): 352,\n    (\"amd64\", \"xmm4hq\"): 360,\n    (\"amd64\", \"ymm4hx\"): 368,\n    (\"amd64\", \"ymm5\"): 384,\n    (\"amd64\", \"xmm5\"): 384,\n    (\"amd64\", \"xmm5lq\"): 384,\n    (\"amd64\", \"xmm5hq\"): 392,\n    (\"amd64\", \"ymm5hx\"): 400,\n    (\"amd64\", \"ymm6\"): 416,\n    (\"amd64\", \"xmm6\"): 416,\n    (\"amd64\", \"xmm6lq\"): 416,\n    (\"amd64\", \"xmm6hq\"): 424,\n    (\"amd64\", \"ymm6hx\"): 432,\n    (\"amd64\", \"ymm7\"): 448,\n    (\"amd64\", \"xmm7\"): 448,\n    (\"amd64\", \"xmm7lq\"): 448,\n    (\"amd64\", \"xmm7hq\"): 456,\n    (\"amd64\", \"ymm7hx\"): 464,\n    (\"amd64\", \"ymm8\"): 480,\n    (\"amd64\", \"xmm8\"): 480,\n    (\"amd64\", \"xmm8lq\"): 480,\n    (\"amd64\", \"xmm8hq\"): 488,\n    (\"amd64\", \"ymm8hx\"): 496,\n    (\"amd64\", \"ymm9\"): 512,\n    (\"amd64\", \"xmm9\"): 512,\n    (\"amd64\", \"xmm9lq\"): 512,\n    (\"amd64\", \"xmm9hq\"): 520,\n    (\"amd64\", \"ymm9hx\"): 528,\n    (\"amd64\", \"ymm10\"): 544,\n    (\"amd64\", \"xmm10\"): 544,\n    (\"amd64\", \"xmm10lq\"): 544,\n    (\"amd64\", \"xmm10hq\"): 552,\n    (\"amd64\", \"ymm10hx\"): 560,\n    (\"amd64\", \"ymm11\"): 576,\n    (\"amd64\", \"xmm11\"): 576,\n    (\"amd64\", \"xmm11lq\"): 576,\n    (\"amd64\", \"xmm11hq\"): 584,\n    (\"amd64\", \"ymm11hx\"): 592,\n    (\"amd64\", \"ymm12\"): 608,\n    (\"amd64\", \"xmm12\"): 608,\n    (\"amd64\", \"xmm12lq\"): 608,\n    (\"amd64\", \"xmm12hq\"): 616,\n    (\"amd64\", \"ymm12hx\"): 624,\n    (\"amd64\", \"ymm13\"): 640,\n    (\"amd64\", \"xmm13\"): 640,\n    (\"amd64\", \"xmm13lq\"): 640,\n    (\"amd64\", \"xmm13hq\"): 648,\n    (\"amd64\", \"ymm13hx\"): 656,\n    (\"amd64\", \"ymm14\"): 672,\n    (\"amd64\", \"xmm14\"): 672,\n    (\"amd64\", \"xmm14lq\"): 672,\n    (\"amd64\", \"xmm14hq\"): 680,\n    (\"amd64\", \"ymm14hx\"): 688,\n    (\"amd64\", \"ymm15\"): 704,\n    (\"amd64\", \"xmm15\"): 704,\n    (\"amd64\", \"xmm15lq\"): 704,\n    (\"amd64\", \"xmm15hq\"): 712,\n    (\"amd64\", \"ymm15hx\"): 720,\n    (\"amd64\", \"ftop\"): 896,\n    (\"amd64\", \"fpreg\"): 904,\n    (\"amd64\", \"fpu_regs\"): 904,\n    (\"amd64\", \"mm0\"): 904,\n    (\"amd64\", \"mm1\"): 912,\n    (\"amd64\", \"mm2\"): 920,\n    (\"amd64\", \"mm3\"): 928,\n    (\"amd64\", \"mm4\"): 936,\n    (\"amd64\", \"mm5\"): 944,\n    (\"amd64\", \"mm6\"): 952,\n    (\"amd64\", \"mm7\"): 960,\n    (\"amd64\", \"fptag\"): 968,\n    (\"amd64\", \"fpu_tags\"): 968,\n    (\"amd64\", \"fpround\"): 976,\n    (\"amd64\", \"fc3210\"): 984,\n    (\"amd64\", \"emnote\"): 992,\n    (\"amd64\", \"cmstart\"): 1000,\n    (\"amd64\", \"cmlen\"): 1008,\n    (\"amd64\", \"nraddr\"): 1016,\n    (\"amd64\", \"gs\"): 1032,\n    (\"amd64\", \"gs_const\"): 1032,\n    (\"amd64\", \"ip_at_syscall\"): 1040,\n    (\"amd64\", \"cs_seg\"): 1048,\n    (\"amd64\", \"ds_seg\"): 1050,\n    (\"amd64\", \"es_seg\"): 1052,\n    (\"amd64\", \"fs_seg\"): 1054,\n    (\"amd64\", \"gs_seg\"): 1056,\n    (\"amd64\", \"ss_seg\"): 1058,\n    (\"arm\", \"r0\"): 8,\n    (\"arm\", \"a1\"): 8,\n    (\"arm\", \"r1\"): 12,\n    (\"arm\", \"a2\"): 12,\n    (\"arm\", \"r2\"): 16,\n    (\"arm\", \"a3\"): 16,\n    (\"arm\", \"r3\"): 20,\n    (\"arm\", \"a4\"): 20,\n    (\"arm\", \"r4\"): 24,\n    (\"arm\", \"v1\"): 24,\n    (\"arm\", \"r5\"): 28,\n    (\"arm\", \"v2\"): 28,\n    (\"arm\", \"r6\"): 32,\n    (\"arm\", \"v3\"): 32,\n    (\"arm\", \"r7\"): 36,\n    (\"arm\", \"v4\"): 36,\n    (\"arm\", \"r8\"): 40,\n    (\"arm\", \"v5\"): 40,\n    (\"arm\", \"r9\"): 44,\n    (\"arm\", \"v6\"): 44,\n    (\"arm\", \"sb\"): 44,\n    (\"arm\", \"r10\"): 48,\n    (\"arm\", \"v7\"): 48,\n    (\"arm\", \"sl\"): 48,\n    (\"arm\", \"r11\"): 52,\n    (\"arm\", \"v8\"): 52,\n    (\"arm\", \"fp\"): 52,\n    (\"arm\", \"bp\"): 52,\n    (\"arm\", \"r12\"): 56,\n    (\"arm\", \"sp\"): 60,\n    (\"arm\", \"r13\"): 60,\n    (\"arm\", \"lr\"): 64,\n    (\"arm\", \"r14\"): 64,\n    (\"arm\", \"pc\"): 68,\n    (\"arm\", \"r15\"): 68,\n    (\"arm\", \"ip\"): 68,\n    (\"arm\", \"cc_op\"): 72,\n    (\"arm\", \"cc_dep1\"): 76,\n    (\"arm\", \"cc_dep2\"): 80,\n    (\"arm\", \"cc_ndep\"): 84,\n    (\"arm\", \"qflag32\"): 88,\n    (\"arm\", \"geflag0\"): 92,\n    (\"arm\", \"geflag1\"): 96,\n    (\"arm\", \"geflag2\"): 100,\n    (\"arm\", \"geflag3\"): 104,\n    (\"arm\", \"emnote\"): 108,\n    (\"arm\", \"cmstart\"): 112,\n    (\"arm\", \"cmlen\"): 116,\n    (\"arm\", \"nraddr\"): 120,\n    (\"arm\", \"ip_at_syscall\"): 124,\n    (\"arm\", \"d0\"): 128,\n    (\"arm\", \"s0\"): 128,\n    (\"arm\", \"s1\"): 132,\n    (\"arm\", \"d1\"): 136,\n    (\"arm\", \"s2\"): 136,\n    (\"arm\", \"s3\"): 140,\n    (\"arm\", \"d2\"): 144,\n    (\"arm\", \"s4\"): 144,\n    (\"arm\", \"s5\"): 148,\n    (\"arm\", \"d3\"): 152,\n    (\"arm\", \"s6\"): 152,\n    (\"arm\", \"s7\"): 156,\n    (\"arm\", \"d4\"): 160,\n    (\"arm\", \"s8\"): 160,\n    (\"arm\", \"s9\"): 164,\n    (\"arm\", \"d5\"): 168,\n    (\"arm\", \"s10\"): 168,\n    (\"arm\", \"s11\"): 172,\n    (\"arm\", \"d6\"): 176,\n    (\"arm\", \"s12\"): 176,\n    (\"arm\", \"s13\"): 180,\n    (\"arm\", \"d7\"): 184,\n    (\"arm\", \"s14\"): 184,\n    (\"arm\", \"s15\"): 188,\n    (\"arm\", \"d8\"): 192,\n    (\"arm\", \"s16\"): 192,\n    (\"arm\", \"s17\"): 196,\n    (\"arm\", \"d9\"): 200,\n    (\"arm\", \"s18\"): 200,\n    (\"arm\", \"s19\"): 204,\n    (\"arm\", \"d10\"): 208,\n    (\"arm\", \"s20\"): 208,\n    (\"arm\", \"s21\"): 212,\n    (\"arm\", \"d11\"): 216,\n    (\"arm\", \"s22\"): 216,\n    (\"arm\", \"s23\"): 220,\n    (\"arm\", \"d12\"): 224,\n    (\"arm\", \"s24\"): 224,\n    (\"arm\", \"s25\"): 228,\n    (\"arm\", \"d13\"): 232,\n    (\"arm\", \"s26\"): 232,\n    (\"arm\", \"s27\"): 236,\n    (\"arm\", \"d14\"): 240,\n    (\"arm\", \"s28\"): 240,\n    (\"arm\", \"s29\"): 244,\n    (\"arm\", \"d15\"): 248,\n    (\"arm\", \"s30\"): 248,\n    (\"arm\", \"s31\"): 252,\n    (\"arm\", \"d16\"): 256,\n    (\"arm\", \"d17\"): 264,\n    (\"arm\", \"d18\"): 272,\n    (\"arm\", \"d19\"): 280,\n    (\"arm\", \"d20\"): 288,\n    (\"arm\", \"d21\"): 296,\n    (\"arm\", \"d22\"): 304,\n    (\"arm\", \"d23\"): 312,\n    (\"arm\", \"d24\"): 320,\n    (\"arm\", \"d25\"): 328,\n    (\"arm\", \"d26\"): 336,\n    (\"arm\", \"d27\"): 344,\n    (\"arm\", \"d28\"): 352,\n    (\"arm\", \"d29\"): 360,\n    (\"arm\", \"d30\"): 368,\n    (\"arm\", \"d31\"): 376,\n    (\"arm\", \"fpscr\"): 384,\n    (\"arm\", \"tpidruro\"): 388,\n    (\"arm\", \"itstate\"): 392,\n    (\"arm64\", \"x0\"): 16,\n    (\"arm64\", \"r0\"): 16,\n    (\"arm64\", \"w0\"): 16,\n    (\"arm64\", \"x1\"): 24,\n    (\"arm64\", \"r1\"): 24,\n    (\"arm64\", \"w1\"): 24,\n    (\"arm64\", \"x2\"): 32,\n    (\"arm64\", \"r2\"): 32,\n    (\"arm64\", \"w2\"): 32,\n    (\"arm64\", \"x3\"): 40,\n    (\"arm64\", \"r3\"): 40,\n    (\"arm64\", \"w3\"): 40,\n    (\"arm64\", \"x4\"): 48,\n    (\"arm64\", \"r4\"): 48,\n    (\"arm64\", \"w4\"): 48,\n    (\"arm64\", \"x5\"): 56,\n    (\"arm64\", \"r5\"): 56,\n    (\"arm64\", \"w5\"): 56,\n    (\"arm64\", \"x6\"): 64,\n    (\"arm64\", \"r6\"): 64,\n    (\"arm64\", \"w6\"): 64,\n    (\"arm64\", \"x7\"): 72,\n    (\"arm64\", \"r7\"): 72,\n    (\"arm64\", \"w7\"): 72,\n    (\"arm64\", \"x8\"): 80,\n    (\"arm64\", \"r8\"): 80,\n    (\"arm64\", \"w8\"): 80,\n    (\"arm64\", \"x9\"): 88,\n    (\"arm64\", \"r9\"): 88,\n    (\"arm64\", \"w9\"): 88,\n    (\"arm64\", \"x10\"): 96,\n    (\"arm64\", \"r10\"): 96,\n    (\"arm64\", \"w10\"): 96,\n    (\"arm64\", \"x11\"): 104,\n    (\"arm64\", \"r11\"): 104,\n    (\"arm64\", \"w11\"): 104,\n    (\"arm64\", \"x12\"): 112,\n    (\"arm64\", \"r12\"): 112,\n    (\"arm64\", \"w12\"): 112,\n    (\"arm64\", \"x13\"): 120,\n    (\"arm64\", \"r13\"): 120,\n    (\"arm64\", \"w13\"): 120,\n    (\"arm64\", \"x14\"): 128,\n    (\"arm64\", \"r14\"): 128,\n    (\"arm64\", \"w14\"): 128,\n    (\"arm64\", \"x15\"): 136,\n    (\"arm64\", \"r15\"): 136,\n    (\"arm64\", \"w15\"): 136,\n    (\"arm64\", \"x16\"): 144,\n    (\"arm64\", \"r16\"): 144,\n    (\"arm64\", \"ip0\"): 144,\n    (\"arm64\", \"w16\"): 144,\n    (\"arm64\", \"x17\"): 152,\n    (\"arm64\", \"r17\"): 152,\n    (\"arm64\", \"ip1\"): 152,\n    (\"arm64\", \"w17\"): 152,\n    (\"arm64\", \"x18\"): 160,\n    (\"arm64\", \"r18\"): 160,\n    (\"arm64\", \"w18\"): 160,\n    (\"arm64\", \"x19\"): 168,\n    (\"arm64\", \"r19\"): 168,\n    (\"arm64\", \"w19\"): 168,\n    (\"arm64\", \"x20\"): 176,\n    (\"arm64\", \"r20\"): 176,\n    (\"arm64\", \"w20\"): 176,\n    (\"arm64\", \"x21\"): 184,\n    (\"arm64\", \"r21\"): 184,\n    (\"arm64\", \"w21\"): 184,\n    (\"arm64\", \"x22\"): 192,\n    (\"arm64\", \"r22\"): 192,\n    (\"arm64\", \"w22\"): 192,\n    (\"arm64\", \"x23\"): 200,\n    (\"arm64\", \"r23\"): 200,\n    (\"arm64\", \"w23\"): 200,\n    (\"arm64\", \"x24\"): 208,\n    (\"arm64\", \"r24\"): 208,\n    (\"arm64\", \"w24\"): 208,\n    (\"arm64\", \"x25\"): 216,\n    (\"arm64\", \"r25\"): 216,\n    (\"arm64\", \"w25\"): 216,\n    (\"arm64\", \"x26\"): 224,\n    (\"arm64\", \"r26\"): 224,\n    (\"arm64\", \"w26\"): 224,\n    (\"arm64\", \"x27\"): 232,\n    (\"arm64\", \"r27\"): 232,\n    (\"arm64\", \"w27\"): 232,\n    (\"arm64\", \"x28\"): 240,\n    (\"arm64\", \"r28\"): 240,\n    (\"arm64\", \"w28\"): 240,\n    (\"arm64\", \"x29\"): 248,\n    (\"arm64\", \"r29\"): 248,\n    (\"arm64\", \"fp\"): 248,\n    (\"arm64\", \"bp\"): 248,\n    (\"arm64\", \"w29\"): 248,\n    (\"arm64\", \"x30\"): 256,\n    (\"arm64\", \"r30\"): 256,\n    (\"arm64\", \"lr\"): 256,\n    (\"arm64\", \"w30\"): 256,\n    (\"arm64\", \"xsp\"): 264,\n    (\"arm64\", \"sp\"): 264,\n    (\"arm64\", \"wsp\"): 264,\n    (\"arm64\", \"pc\"): 272,\n    (\"arm64\", \"ip\"): 272,\n    (\"arm64\", \"cc_op\"): 280,\n    (\"arm64\", \"cc_dep1\"): 288,\n    (\"arm64\", \"cc_dep2\"): 296,\n    (\"arm64\", \"cc_ndep\"): 304,\n    (\"arm64\", \"tpidr_el0\"): 312,\n    (\"arm64\", \"q0\"): 320,\n    (\"arm64\", \"v0\"): 320,\n    (\"arm64\", \"d0\"): 320,\n    (\"arm64\", \"s0\"): 320,\n    (\"arm64\", \"h0\"): 320,\n    (\"arm64\", \"b0\"): 320,\n    (\"arm64\", \"q1\"): 336,\n    (\"arm64\", \"v1\"): 336,\n    (\"arm64\", \"d1\"): 336,\n    (\"arm64\", \"s1\"): 336,\n    (\"arm64\", \"h1\"): 336,\n    (\"arm64\", \"b1\"): 336,\n    (\"arm64\", \"q2\"): 352,\n    (\"arm64\", \"v2\"): 352,\n    (\"arm64\", \"d2\"): 352,\n    (\"arm64\", \"s2\"): 352,\n    (\"arm64\", \"h2\"): 352,\n    (\"arm64\", \"b2\"): 352,\n    (\"arm64\", \"q3\"): 368,\n    (\"arm64\", \"v3\"): 368,\n    (\"arm64\", \"d3\"): 368,\n    (\"arm64\", \"s3\"): 368,\n    (\"arm64\", \"h3\"): 368,\n    (\"arm64\", \"b3\"): 368,\n    (\"arm64\", \"q4\"): 384,\n    (\"arm64\", \"v4\"): 384,\n    (\"arm64\", \"d4\"): 384,\n    (\"arm64\", \"s4\"): 384,\n    (\"arm64\", \"h4\"): 384,\n    (\"arm64\", \"b4\"): 384,\n    (\"arm64\", \"q5\"): 400,\n    (\"arm64\", \"v5\"): 400,\n    (\"arm64\", \"d5\"): 400,\n    (\"arm64\", \"s5\"): 400,\n    (\"arm64\", \"h5\"): 400,\n    (\"arm64\", \"b5\"): 400,\n    (\"arm64\", \"q6\"): 416,\n    (\"arm64\", \"v6\"): 416,\n    (\"arm64\", \"d6\"): 416,\n    (\"arm64\", \"s6\"): 416,\n    (\"arm64\", \"h6\"): 416,\n    (\"arm64\", \"b6\"): 416,\n    (\"arm64\", \"q7\"): 432,\n    (\"arm64\", \"v7\"): 432,\n    (\"arm64\", \"d7\"): 432,\n    (\"arm64\", \"s7\"): 432,\n    (\"arm64\", \"h7\"): 432,\n    (\"arm64\", \"b7\"): 432,\n    (\"arm64\", \"q8\"): 448,\n    (\"arm64\", \"v8\"): 448,\n    (\"arm64\", \"d8\"): 448,\n    (\"arm64\", \"s8\"): 448,\n    (\"arm64\", \"h8\"): 448,\n    (\"arm64\", \"b8\"): 448,\n    (\"arm64\", \"q9\"): 464,\n    (\"arm64\", \"v9\"): 464,\n    (\"arm64\", \"d9\"): 464,\n    (\"arm64\", \"s9\"): 464,\n    (\"arm64\", \"h9\"): 464,\n    (\"arm64\", \"b9\"): 464,\n    (\"arm64\", \"q10\"): 480,\n    (\"arm64\", \"v10\"): 480,\n    (\"arm64\", \"d10\"): 480,\n    (\"arm64\", \"s10\"): 480,\n    (\"arm64\", \"h10\"): 480,\n    (\"arm64\", \"b10\"): 480,\n    (\"arm64\", \"q11\"): 496,\n    (\"arm64\", \"v11\"): 496,\n    (\"arm64\", \"d11\"): 496,\n    (\"arm64\", \"s11\"): 496,\n    (\"arm64\", \"h11\"): 496,\n    (\"arm64\", \"b11\"): 496,\n    (\"arm64\", \"q12\"): 512,\n    (\"arm64\", \"v12\"): 512,\n    (\"arm64\", \"d12\"): 512,\n    (\"arm64\", \"s12\"): 512,\n    (\"arm64\", \"h12\"): 512,\n    (\"arm64\", \"b12\"): 512,\n    (\"arm64\", \"q13\"): 528,\n    (\"arm64\", \"v13\"): 528,\n    (\"arm64\", \"d13\"): 528,\n    (\"arm64\", \"s13\"): 528,\n    (\"arm64\", \"h13\"): 528,\n    (\"arm64\", \"b13\"): 528,\n    (\"arm64\", \"q14\"): 544,\n    (\"arm64\", \"v14\"): 544,\n    (\"arm64\", \"d14\"): 544,\n    (\"arm64\", \"s14\"): 544,\n    (\"arm64\", \"h14\"): 544,\n    (\"arm64\", \"b14\"): 544,\n    (\"arm64\", \"q15\"): 560,\n    (\"arm64\", \"v15\"): 560,\n    (\"arm64\", \"d15\"): 560,\n    (\"arm64\", \"s15\"): 560,\n    (\"arm64\", \"h15\"): 560,\n    (\"arm64\", \"b15\"): 560,\n    (\"arm64\", \"q16\"): 576,\n    (\"arm64\", \"v16\"): 576,\n    (\"arm64\", \"d16\"): 576,\n    (\"arm64\", \"s16\"): 576,\n    (\"arm64\", \"h16\"): 576,\n    (\"arm64\", \"b16\"): 576,\n    (\"arm64\", \"q17\"): 592,\n    (\"arm64\", \"v17\"): 592,\n    (\"arm64\", \"d17\"): 592,\n    (\"arm64\", \"s17\"): 592,\n    (\"arm64\", \"h17\"): 592,\n    (\"arm64\", \"b17\"): 592,\n    (\"arm64\", \"q18\"): 608,\n    (\"arm64\", \"v18\"): 608,\n    (\"arm64\", \"d18\"): 608,\n    (\"arm64\", \"s18\"): 608,\n    (\"arm64\", \"h18\"): 608,\n    (\"arm64\", \"b18\"): 608,\n    (\"arm64\", \"q19\"): 624,\n    (\"arm64\", \"v19\"): 624,\n    (\"arm64\", \"d19\"): 624,\n    (\"arm64\", \"s19\"): 624,\n    (\"arm64\", \"h19\"): 624,\n    (\"arm64\", \"b19\"): 624,\n    (\"arm64\", \"q20\"): 640,\n    (\"arm64\", \"v20\"): 640,\n    (\"arm64\", \"d20\"): 640,\n    (\"arm64\", \"s20\"): 640,\n    (\"arm64\", \"h20\"): 640,\n    (\"arm64\", \"b20\"): 640,\n    (\"arm64\", \"q21\"): 656,\n    (\"arm64\", \"v21\"): 656,\n    (\"arm64\", \"d21\"): 656,\n    (\"arm64\", \"s21\"): 656,\n    (\"arm64\", \"h21\"): 656,\n    (\"arm64\", \"b21\"): 656,\n    (\"arm64\", \"q22\"): 672,\n    (\"arm64\", \"v22\"): 672,\n    (\"arm64\", \"d22\"): 672,\n    (\"arm64\", \"s22\"): 672,\n    (\"arm64\", \"h22\"): 672,\n    (\"arm64\", \"b22\"): 672,\n    (\"arm64\", \"q23\"): 688,\n    (\"arm64\", \"v23\"): 688,\n    (\"arm64\", \"d23\"): 688,\n    (\"arm64\", \"s23\"): 688,\n    (\"arm64\", \"h23\"): 688,\n    (\"arm64\", \"b23\"): 688,\n    (\"arm64\", \"q24\"): 704,\n    (\"arm64\", \"v24\"): 704,\n    (\"arm64\", \"d24\"): 704,\n    (\"arm64\", \"s24\"): 704,\n    (\"arm64\", \"h24\"): 704,\n    (\"arm64\", \"b24\"): 704,\n    (\"arm64\", \"q25\"): 720,\n    (\"arm64\", \"v25\"): 720,\n    (\"arm64\", \"d25\"): 720,\n    (\"arm64\", \"s25\"): 720,\n    (\"arm64\", \"h25\"): 720,\n    (\"arm64\", \"b25\"): 720,\n    (\"arm64\", \"q26\"): 736,\n    (\"arm64\", \"v26\"): 736,\n    (\"arm64\", \"d26\"): 736,\n    (\"arm64\", \"s26\"): 736,\n    (\"arm64\", \"h26\"): 736,\n    (\"arm64\", \"b26\"): 736,\n    (\"arm64\", \"q27\"): 752,\n    (\"arm64\", \"v27\"): 752,\n    (\"arm64\", \"d27\"): 752,\n    (\"arm64\", \"s27\"): 752,\n    (\"arm64\", \"h27\"): 752,\n    (\"arm64\", \"b27\"): 752,\n    (\"arm64\", \"q28\"): 768,\n    (\"arm64\", \"v28\"): 768,\n    (\"arm64\", \"d28\"): 768,\n    (\"arm64\", \"s28\"): 768,\n    (\"arm64\", \"h28\"): 768,\n    (\"arm64\", \"b28\"): 768,\n    (\"arm64\", \"q29\"): 784,\n    (\"arm64\", \"v29\"): 784,\n    (\"arm64\", \"d29\"): 784,\n    (\"arm64\", \"s29\"): 784,\n    (\"arm64\", \"h29\"): 784,\n    (\"arm64\", \"b29\"): 784,\n    (\"arm64\", \"q30\"): 800,\n    (\"arm64\", \"v30\"): 800,\n    (\"arm64\", \"d30\"): 800,\n    (\"arm64\", \"s30\"): 800,\n    (\"arm64\", \"h30\"): 800,\n    (\"arm64\", \"b30\"): 800,\n    (\"arm64\", \"q31\"): 816,\n    (\"arm64\", \"v31\"): 816,\n    (\"arm64\", \"d31\"): 816,\n    (\"arm64\", \"s31\"): 816,\n    (\"arm64\", \"h31\"): 816,\n    (\"arm64\", \"b31\"): 816,\n    (\"arm64\", \"qcflag\"): 832,\n    (\"arm64\", \"emnote\"): 848,\n    (\"arm64\", \"cmstart\"): 856,\n    (\"arm64\", \"cmlen\"): 864,\n    (\"arm64\", \"nraddr\"): 872,\n    (\"arm64\", \"ip_at_syscall\"): 880,\n    (\"arm64\", \"fpcr\"): 888,\n    (\"ppc32\", \"gpr0\"): 16,\n    (\"ppc32\", \"r0\"): 16,\n    (\"ppc32\", \"gpr1\"): 20,\n    (\"ppc32\", \"r1\"): 20,\n    (\"ppc32\", \"sp\"): 20,\n    (\"ppc32\", \"gpr2\"): 24,\n    (\"ppc32\", \"r2\"): 24,\n    (\"ppc32\", \"gpr3\"): 28,\n    (\"ppc32\", \"r3\"): 28,\n    (\"ppc32\", \"gpr4\"): 32,\n    (\"ppc32\", \"r4\"): 32,\n    (\"ppc32\", \"gpr5\"): 36,\n    (\"ppc32\", \"r5\"): 36,\n    (\"ppc32\", \"gpr6\"): 40,\n    (\"ppc32\", \"r6\"): 40,\n    (\"ppc32\", \"gpr7\"): 44,\n    (\"ppc32\", \"r7\"): 44,\n    (\"ppc32\", \"gpr8\"): 48,\n    (\"ppc32\", \"r8\"): 48,\n    (\"ppc32\", \"gpr9\"): 52,\n    (\"ppc32\", \"r9\"): 52,\n    (\"ppc32\", \"gpr10\"): 56,\n    (\"ppc32\", \"r10\"): 56,\n    (\"ppc32\", \"gpr11\"): 60,\n    (\"ppc32\", \"r11\"): 60,\n    (\"ppc32\", \"gpr12\"): 64,\n    (\"ppc32\", \"r12\"): 64,\n    (\"ppc32\", \"gpr13\"): 68,\n    (\"ppc32\", \"r13\"): 68,\n    (\"ppc32\", \"gpr14\"): 72,\n    (\"ppc32\", \"r14\"): 72,\n    (\"ppc32\", \"gpr15\"): 76,\n    (\"ppc32\", \"r15\"): 76,\n    (\"ppc32\", \"gpr16\"): 80,\n    (\"ppc32\", \"r16\"): 80,\n    (\"ppc32\", \"gpr17\"): 84,\n    (\"ppc32\", \"r17\"): 84,\n    (\"ppc32\", \"gpr18\"): 88,\n    (\"ppc32\", \"r18\"): 88,\n    (\"ppc32\", \"gpr19\"): 92,\n    (\"ppc32\", \"r19\"): 92,\n    (\"ppc32\", \"gpr20\"): 96,\n    (\"ppc32\", \"r20\"): 96,\n    (\"ppc32\", \"gpr21\"): 100,\n    (\"ppc32\", \"r21\"): 100,\n    (\"ppc32\", \"gpr22\"): 104,\n    (\"ppc32\", \"r22\"): 104,\n    (\"ppc32\", \"gpr23\"): 108,\n    (\"ppc32\", \"r23\"): 108,\n    (\"ppc32\", \"gpr24\"): 112,\n    (\"ppc32\", \"r24\"): 112,\n    (\"ppc32\", \"gpr25\"): 116,\n    (\"ppc32\", \"r25\"): 116,\n    (\"ppc32\", \"gpr26\"): 120,\n    (\"ppc32\", \"r26\"): 120,\n    (\"ppc32\", \"gpr27\"): 124,\n    (\"ppc32\", \"r27\"): 124,\n    (\"ppc32\", \"gpr28\"): 128,\n    (\"ppc32\", \"r28\"): 128,\n    (\"ppc32\", \"gpr29\"): 132,\n    (\"ppc32\", \"r29\"): 132,\n    (\"ppc32\", \"gpr30\"): 136,\n    (\"ppc32\", \"r30\"): 136,\n    (\"ppc32\", \"gpr31\"): 140,\n    (\"ppc32\", \"r31\"): 140,\n    (\"ppc32\", \"bp\"): 140,\n    (\"ppc32\", \"vsr0\"): 144,\n    (\"ppc32\", \"v0\"): 144,\n    (\"ppc32\", \"fpr0\"): 144,\n    (\"ppc32\", \"vsr1\"): 160,\n    (\"ppc32\", \"v1\"): 160,\n    (\"ppc32\", \"fpr1\"): 160,\n    (\"ppc32\", \"vsr2\"): 176,\n    (\"ppc32\", \"v2\"): 176,\n    (\"ppc32\", \"fpr2\"): 176,\n    (\"ppc32\", \"vsr3\"): 192,\n    (\"ppc32\", \"v3\"): 192,\n    (\"ppc32\", \"fpr3\"): 192,\n    (\"ppc32\", \"vsr4\"): 208,\n    (\"ppc32\", \"v4\"): 208,\n    (\"ppc32\", \"fpr4\"): 208,\n    (\"ppc32\", \"vsr5\"): 224,\n    (\"ppc32\", \"v5\"): 224,\n    (\"ppc32\", \"fpr5\"): 224,\n    (\"ppc32\", \"vsr6\"): 240,\n    (\"ppc32\", \"v6\"): 240,\n    (\"ppc32\", \"fpr6\"): 240,\n    (\"ppc32\", \"vsr7\"): 256,\n    (\"ppc32\", \"v7\"): 256,\n    (\"ppc32\", \"fpr7\"): 256,\n    (\"ppc32\", \"vsr8\"): 272,\n    (\"ppc32\", \"v8\"): 272,\n    (\"ppc32\", \"fpr8\"): 272,\n    (\"ppc32\", \"vsr9\"): 288,\n    (\"ppc32\", \"v9\"): 288,\n    (\"ppc32\", \"fpr9\"): 288,\n    (\"ppc32\", \"vsr10\"): 304,\n    (\"ppc32\", \"v10\"): 304,\n    (\"ppc32\", \"fpr10\"): 304,\n    (\"ppc32\", \"vsr11\"): 320,\n    (\"ppc32\", \"v11\"): 320,\n    (\"ppc32\", \"fpr11\"): 320,\n    (\"ppc32\", \"vsr12\"): 336,\n    (\"ppc32\", \"v12\"): 336,\n    (\"ppc32\", \"fpr12\"): 336,\n    (\"ppc32\", \"vsr13\"): 352,\n    (\"ppc32\", \"v13\"): 352,\n    (\"ppc32\", \"fpr13\"): 352,\n    (\"ppc32\", \"vsr14\"): 368,\n    (\"ppc32\", \"v14\"): 368,\n    (\"ppc32\", \"fpr14\"): 368,\n    (\"ppc32\", \"vsr15\"): 384,\n    (\"ppc32\", \"v15\"): 384,\n    (\"ppc32\", \"fpr15\"): 384,\n    (\"ppc32\", \"vsr16\"): 400,\n    (\"ppc32\", \"v16\"): 400,\n    (\"ppc32\", \"fpr16\"): 400,\n    (\"ppc32\", \"vsr17\"): 416,\n    (\"ppc32\", \"v17\"): 416,\n    (\"ppc32\", \"fpr17\"): 416,\n    (\"ppc32\", \"vsr18\"): 432,\n    (\"ppc32\", \"v18\"): 432,\n    (\"ppc32\", \"fpr18\"): 432,\n    (\"ppc32\", \"vsr19\"): 448,\n    (\"ppc32\", \"v19\"): 448,\n    (\"ppc32\", \"fpr19\"): 448,\n    (\"ppc32\", \"vsr20\"): 464,\n    (\"ppc32\", \"v20\"): 464,\n    (\"ppc32\", \"fpr20\"): 464,\n    (\"ppc32\", \"vsr21\"): 480,\n    (\"ppc32\", \"v21\"): 480,\n    (\"ppc32\", \"fpr21\"): 480,\n    (\"ppc32\", \"vsr22\"): 496,\n    (\"ppc32\", \"v22\"): 496,\n    (\"ppc32\", \"fpr22\"): 496,\n    (\"ppc32\", \"vsr23\"): 512,\n    (\"ppc32\", \"v23\"): 512,\n    (\"ppc32\", \"fpr23\"): 512,\n    (\"ppc32\", \"vsr24\"): 528,\n    (\"ppc32\", \"v24\"): 528,\n    (\"ppc32\", \"fpr24\"): 528,\n    (\"ppc32\", \"vsr25\"): 544,\n    (\"ppc32\", \"v25\"): 544,\n    (\"ppc32\", \"fpr25\"): 544,\n    (\"ppc32\", \"vsr26\"): 560,\n    (\"ppc32\", \"v26\"): 560,\n    (\"ppc32\", \"fpr26\"): 560,\n    (\"ppc32\", \"vsr27\"): 576,\n    (\"ppc32\", \"v27\"): 576,\n    (\"ppc32\", \"fpr27\"): 576,\n    (\"ppc32\", \"vsr28\"): 592,\n    (\"ppc32\", \"v28\"): 592,\n    (\"ppc32\", \"fpr28\"): 592,\n    (\"ppc32\", \"vsr29\"): 608,\n    (\"ppc32\", \"v29\"): 608,\n    (\"ppc32\", \"fpr29\"): 608,\n    (\"ppc32\", \"vsr30\"): 624,\n    (\"ppc32\", \"v30\"): 624,\n    (\"ppc32\", \"fpr30\"): 624,\n    (\"ppc32\", \"vsr31\"): 640,\n    (\"ppc32\", \"v31\"): 640,\n    (\"ppc32\", \"fpr31\"): 640,\n    (\"ppc32\", \"vsr32\"): 656,\n    (\"ppc32\", \"v32\"): 656,\n    (\"ppc32\", \"vsr33\"): 672,\n    (\"ppc32\", \"v33\"): 672,\n    (\"ppc32\", \"vsr34\"): 688,\n    (\"ppc32\", \"v34\"): 688,\n    (\"ppc32\", \"vsr35\"): 704,\n    (\"ppc32\", \"v35\"): 704,\n    (\"ppc32\", \"vsr36\"): 720,\n    (\"ppc32\", \"v36\"): 720,\n    (\"ppc32\", \"vsr37\"): 736,\n    (\"ppc32\", \"v37\"): 736,\n    (\"ppc32\", \"vsr38\"): 752,\n    (\"ppc32\", \"v38\"): 752,\n    (\"ppc32\", \"vsr39\"): 768,\n    (\"ppc32\", \"v39\"): 768,\n    (\"ppc32\", \"vsr40\"): 784,\n    (\"ppc32\", \"v40\"): 784,\n    (\"ppc32\", \"vsr41\"): 800,\n    (\"ppc32\", \"v41\"): 800,\n    (\"ppc32\", \"vsr42\"): 816,\n    (\"ppc32\", \"v42\"): 816,\n    (\"ppc32\", \"vsr43\"): 832,\n    (\"ppc32\", \"v43\"): 832,\n    (\"ppc32\", \"vsr44\"): 848,\n    (\"ppc32\", \"v44\"): 848,\n    (\"ppc32\", \"vsr45\"): 864,\n    (\"ppc32\", \"v45\"): 864,\n    (\"ppc32\", \"vsr46\"): 880,\n    (\"ppc32\", \"v46\"): 880,\n    (\"ppc32\", \"vsr47\"): 896,\n    (\"ppc32\", \"v47\"): 896,\n    (\"ppc32\", \"vsr48\"): 912,\n    (\"ppc32\", \"v48\"): 912,\n    (\"ppc32\", \"vsr49\"): 928,\n    (\"ppc32\", \"v49\"): 928,\n    (\"ppc32\", \"vsr50\"): 944,\n    (\"ppc32\", \"v50\"): 944,\n    (\"ppc32\", \"vsr51\"): 960,\n    (\"ppc32\", \"v51\"): 960,\n    (\"ppc32\", \"vsr52\"): 976,\n    (\"ppc32\", \"v52\"): 976,\n    (\"ppc32\", \"vsr53\"): 992,\n    (\"ppc32\", \"v53\"): 992,\n    (\"ppc32\", \"vsr54\"): 1008,\n    (\"ppc32\", \"v54\"): 1008,\n    (\"ppc32\", \"vsr55\"): 1024,\n    (\"ppc32\", \"v55\"): 1024,\n    (\"ppc32\", \"vsr56\"): 1040,\n    (\"ppc32\", \"v56\"): 1040,\n    (\"ppc32\", \"vsr57\"): 1056,\n    (\"ppc32\", \"v57\"): 1056,\n    (\"ppc32\", \"vsr58\"): 1072,\n    (\"ppc32\", \"v58\"): 1072,\n    (\"ppc32\", \"vsr59\"): 1088,\n    (\"ppc32\", \"v59\"): 1088,\n    (\"ppc32\", \"vsr60\"): 1104,\n    (\"ppc32\", \"v60\"): 1104,\n    (\"ppc32\", \"vsr61\"): 1120,\n    (\"ppc32\", \"v61\"): 1120,\n    (\"ppc32\", \"vsr62\"): 1136,\n    (\"ppc32\", \"v62\"): 1136,\n    (\"ppc32\", \"vsr63\"): 1152,\n    (\"ppc32\", \"v63\"): 1152,\n    (\"ppc32\", \"cia\"): 1168,\n    (\"ppc32\", \"ip\"): 1168,\n    (\"ppc32\", \"pc\"): 1168,\n    (\"ppc32\", \"lr\"): 1172,\n    (\"ppc32\", \"ctr\"): 1176,\n    (\"ppc32\", \"xer_so\"): 1180,\n    (\"ppc32\", \"xer_ov\"): 1181,\n    (\"ppc32\", \"xer_ca\"): 1182,\n    (\"ppc32\", \"xer_bc\"): 1183,\n    (\"ppc32\", \"cr0_321\"): 1184,\n    (\"ppc32\", \"cr0_0\"): 1185,\n    (\"ppc32\", \"cr0\"): 1185,\n    (\"ppc32\", \"cr1_321\"): 1186,\n    (\"ppc32\", \"cr1_0\"): 1187,\n    (\"ppc32\", \"cr1\"): 1187,\n    (\"ppc32\", \"cr2_321\"): 1188,\n    (\"ppc32\", \"cr2_0\"): 1189,\n    (\"ppc32\", \"cr2\"): 1189,\n    (\"ppc32\", \"cr3_321\"): 1190,\n    (\"ppc32\", \"cr3_0\"): 1191,\n    (\"ppc32\", \"cr3\"): 1191,\n    (\"ppc32\", \"cr4_321\"): 1192,\n    (\"ppc32\", \"cr4_0\"): 1193,\n    (\"ppc32\", \"cr4\"): 1193,\n    (\"ppc32\", \"cr5_321\"): 1194,\n    (\"ppc32\", \"cr5_0\"): 1195,\n    (\"ppc32\", \"cr5\"): 1195,\n    (\"ppc32\", \"cr6_321\"): 1196,\n    (\"ppc32\", \"cr6_0\"): 1197,\n    (\"ppc32\", \"cr6\"): 1197,\n    (\"ppc32\", \"cr7_321\"): 1198,\n    (\"ppc32\", \"cr7_0\"): 1199,\n    (\"ppc32\", \"cr7\"): 1199,\n    (\"ppc32\", \"fpround\"): 1200,\n    (\"ppc32\", \"dfpround\"): 1201,\n    (\"ppc32\", \"c_fpcc\"): 1202,\n    (\"ppc32\", \"vrsave\"): 1204,\n    (\"ppc32\", \"vscr\"): 1208,\n    (\"ppc32\", \"emnote\"): 1212,\n    (\"ppc32\", \"cmstart\"): 1216,\n    (\"ppc32\", \"cmlen\"): 1220,\n    (\"ppc32\", \"nraddr\"): 1224,\n    (\"ppc32\", \"nraddr_gpr2\"): 1228,\n    (\"ppc32\", \"redir_sp\"): 1232,\n    (\"ppc32\", \"redir_stack\"): 1236,\n    (\"ppc32\", \"ip_at_syscall\"): 1364,\n    (\"ppc32\", \"sprg3_ro\"): 1368,\n    (\"ppc32\", \"tfhar\"): 1376,\n    (\"ppc32\", \"texasr\"): 1384,\n    (\"ppc32\", \"tfiar\"): 1392,\n    (\"ppc32\", \"ppr\"): 1400,\n    (\"ppc32\", \"texasru\"): 1408,\n    (\"ppc32\", \"pspb\"): 1412,\n    (\"ppc64\", \"gpr0\"): 16,\n    (\"ppc64\", \"r0\"): 16,\n    (\"ppc64\", \"gpr1\"): 24,\n    (\"ppc64\", \"r1\"): 24,\n    (\"ppc64\", \"sp\"): 24,\n    (\"ppc64\", \"gpr2\"): 32,\n    (\"ppc64\", \"r2\"): 32,\n    (\"ppc64\", \"rtoc\"): 32,\n    (\"ppc64\", \"gpr3\"): 40,\n    (\"ppc64\", \"r3\"): 40,\n    (\"ppc64\", \"gpr4\"): 48,\n    (\"ppc64\", \"r4\"): 48,\n    (\"ppc64\", \"gpr5\"): 56,\n    (\"ppc64\", \"r5\"): 56,\n    (\"ppc64\", \"gpr6\"): 64,\n    (\"ppc64\", \"r6\"): 64,\n    (\"ppc64\", \"gpr7\"): 72,\n    (\"ppc64\", \"r7\"): 72,\n    (\"ppc64\", \"gpr8\"): 80,\n    (\"ppc64\", \"r8\"): 80,\n    (\"ppc64\", \"gpr9\"): 88,\n    (\"ppc64\", \"r9\"): 88,\n    (\"ppc64\", \"gpr10\"): 96,\n    (\"ppc64\", \"r10\"): 96,\n    (\"ppc64\", \"gpr11\"): 104,\n    (\"ppc64\", \"r11\"): 104,\n    (\"ppc64\", \"gpr12\"): 112,\n    (\"ppc64\", \"r12\"): 112,\n    (\"ppc64\", \"gpr13\"): 120,\n    (\"ppc64\", \"r13\"): 120,\n    (\"ppc64\", \"gpr14\"): 128,\n    (\"ppc64\", \"r14\"): 128,\n    (\"ppc64\", \"gpr15\"): 136,\n    (\"ppc64\", \"r15\"): 136,\n    (\"ppc64\", \"gpr16\"): 144,\n    (\"ppc64\", \"r16\"): 144,\n    (\"ppc64\", \"gpr17\"): 152,\n    (\"ppc64\", \"r17\"): 152,\n    (\"ppc64\", \"gpr18\"): 160,\n    (\"ppc64\", \"r18\"): 160,\n    (\"ppc64\", \"gpr19\"): 168,\n    (\"ppc64\", \"r19\"): 168,\n    (\"ppc64\", \"gpr20\"): 176,\n    (\"ppc64\", \"r20\"): 176,\n    (\"ppc64\", \"gpr21\"): 184,\n    (\"ppc64\", \"r21\"): 184,\n    (\"ppc64\", \"gpr22\"): 192,\n    (\"ppc64\", \"r22\"): 192,\n    (\"ppc64\", \"gpr23\"): 200,\n    (\"ppc64\", \"r23\"): 200,\n    (\"ppc64\", \"gpr24\"): 208,\n    (\"ppc64\", \"r24\"): 208,\n    (\"ppc64\", \"gpr25\"): 216,\n    (\"ppc64\", \"r25\"): 216,\n    (\"ppc64\", \"gpr26\"): 224,\n    (\"ppc64\", \"r26\"): 224,\n    (\"ppc64\", \"gpr27\"): 232,\n    (\"ppc64\", \"r27\"): 232,\n    (\"ppc64\", \"gpr28\"): 240,\n    (\"ppc64\", \"r28\"): 240,\n    (\"ppc64\", \"gpr29\"): 248,\n    (\"ppc64\", \"r29\"): 248,\n    (\"ppc64\", \"gpr30\"): 256,\n    (\"ppc64\", \"r30\"): 256,\n    (\"ppc64\", \"gpr31\"): 264,\n    (\"ppc64\", \"r31\"): 264,\n    (\"ppc64\", \"bp\"): 264,\n    (\"ppc64\", \"vsr0\"): 272,\n    (\"ppc64\", \"v0\"): 272,\n    (\"ppc64\", \"fpr0\"): 272,\n    (\"ppc64\", \"vsr1\"): 288,\n    (\"ppc64\", \"v1\"): 288,\n    (\"ppc64\", \"fpr1\"): 288,\n    (\"ppc64\", \"vsr2\"): 304,\n    (\"ppc64\", \"v2\"): 304,\n    (\"ppc64\", \"fpr2\"): 304,\n    (\"ppc64\", \"vsr3\"): 320,\n    (\"ppc64\", \"v3\"): 320,\n    (\"ppc64\", \"fpr3\"): 320,\n    (\"ppc64\", \"vsr4\"): 336,\n    (\"ppc64\", \"v4\"): 336,\n    (\"ppc64\", \"fpr4\"): 336,\n    (\"ppc64\", \"vsr5\"): 352,\n    (\"ppc64\", \"v5\"): 352,\n    (\"ppc64\", \"fpr5\"): 352,\n    (\"ppc64\", \"vsr6\"): 368,\n    (\"ppc64\", \"v6\"): 368,\n    (\"ppc64\", \"fpr6\"): 368,\n    (\"ppc64\", \"vsr7\"): 384,\n    (\"ppc64\", \"v7\"): 384,\n    (\"ppc64\", \"fpr7\"): 384,\n    (\"ppc64\", \"vsr8\"): 400,\n    (\"ppc64\", \"v8\"): 400,\n    (\"ppc64\", \"fpr8\"): 400,\n    (\"ppc64\", \"vsr9\"): 416,\n    (\"ppc64\", \"v9\"): 416,\n    (\"ppc64\", \"fpr9\"): 416,\n    (\"ppc64\", \"vsr10\"): 432,\n    (\"ppc64\", \"v10\"): 432,\n    (\"ppc64\", \"fpr10\"): 432,\n    (\"ppc64\", \"vsr11\"): 448,\n    (\"ppc64\", \"v11\"): 448,\n    (\"ppc64\", \"fpr11\"): 448,\n    (\"ppc64\", \"vsr12\"): 464,\n    (\"ppc64\", \"v12\"): 464,\n    (\"ppc64\", \"fpr12\"): 464,\n    (\"ppc64\", \"vsr13\"): 480,\n    (\"ppc64\", \"v13\"): 480,\n    (\"ppc64\", \"fpr13\"): 480,\n    (\"ppc64\", \"vsr14\"): 496,\n    (\"ppc64\", \"v14\"): 496,\n    (\"ppc64\", \"fpr14\"): 496,\n    (\"ppc64\", \"vsr15\"): 512,\n    (\"ppc64\", \"v15\"): 512,\n    (\"ppc64\", \"fpr15\"): 512,\n    (\"ppc64\", \"vsr16\"): 528,\n    (\"ppc64\", \"v16\"): 528,\n    (\"ppc64\", \"fpr16\"): 528,\n    (\"ppc64\", \"vsr17\"): 544,\n    (\"ppc64\", \"v17\"): 544,\n    (\"ppc64\", \"fpr17\"): 544,\n    (\"ppc64\", \"vsr18\"): 560,\n    (\"ppc64\", \"v18\"): 560,\n    (\"ppc64\", \"fpr18\"): 560,\n    (\"ppc64\", \"vsr19\"): 576,\n    (\"ppc64\", \"v19\"): 576,\n    (\"ppc64\", \"fpr19\"): 576,\n    (\"ppc64\", \"vsr20\"): 592,\n    (\"ppc64\", \"v20\"): 592,\n    (\"ppc64\", \"fpr20\"): 592,\n    (\"ppc64\", \"vsr21\"): 608,\n    (\"ppc64\", \"v21\"): 608,\n    (\"ppc64\", \"fpr21\"): 608,\n    (\"ppc64\", \"vsr22\"): 624,\n    (\"ppc64\", \"v22\"): 624,\n    (\"ppc64\", \"fpr22\"): 624,\n    (\"ppc64\", \"vsr23\"): 640,\n    (\"ppc64\", \"v23\"): 640,\n    (\"ppc64\", \"fpr23\"): 640,\n    (\"ppc64\", \"vsr24\"): 656,\n    (\"ppc64\", \"v24\"): 656,\n    (\"ppc64\", \"fpr24\"): 656,\n    (\"ppc64\", \"vsr25\"): 672,\n    (\"ppc64\", \"v25\"): 672,\n    (\"ppc64\", \"fpr25\"): 672,\n    (\"ppc64\", \"vsr26\"): 688,\n    (\"ppc64\", \"v26\"): 688,\n    (\"ppc64\", \"fpr26\"): 688,\n    (\"ppc64\", \"vsr27\"): 704,\n    (\"ppc64\", \"v27\"): 704,\n    (\"ppc64\", \"fpr27\"): 704,\n    (\"ppc64\", \"vsr28\"): 720,\n    (\"ppc64\", \"v28\"): 720,\n    (\"ppc64\", \"fpr28\"): 720,\n    (\"ppc64\", \"vsr29\"): 736,\n    (\"ppc64\", \"v29\"): 736,\n    (\"ppc64\", \"fpr29\"): 736,\n    (\"ppc64\", \"vsr30\"): 752,\n    (\"ppc64\", \"v30\"): 752,\n    (\"ppc64\", \"fpr30\"): 752,\n    (\"ppc64\", \"vsr31\"): 768,\n    (\"ppc64\", \"v31\"): 768,\n    (\"ppc64\", \"fpr31\"): 768,\n    (\"ppc64\", \"vsr32\"): 784,\n    (\"ppc64\", \"v32\"): 784,\n    (\"ppc64\", \"vsr33\"): 800,\n    (\"ppc64\", \"v33\"): 800,\n    (\"ppc64\", \"vsr34\"): 816,\n    (\"ppc64\", \"v34\"): 816,\n    (\"ppc64\", \"vsr35\"): 832,\n    (\"ppc64\", \"v35\"): 832,\n    (\"ppc64\", \"vsr36\"): 848,\n    (\"ppc64\", \"v36\"): 848,\n    (\"ppc64\", \"vsr37\"): 864,\n    (\"ppc64\", \"v37\"): 864,\n    (\"ppc64\", \"vsr38\"): 880,\n    (\"ppc64\", \"v38\"): 880,\n    (\"ppc64\", \"vsr39\"): 896,\n    (\"ppc64\", \"v39\"): 896,\n    (\"ppc64\", \"vsr40\"): 912,\n    (\"ppc64\", \"v40\"): 912,\n    (\"ppc64\", \"vsr41\"): 928,\n    (\"ppc64\", \"v41\"): 928,\n    (\"ppc64\", \"vsr42\"): 944,\n    (\"ppc64\", \"v42\"): 944,\n    (\"ppc64\", \"vsr43\"): 960,\n    (\"ppc64\", \"v43\"): 960,\n    (\"ppc64\", \"vsr44\"): 976,\n    (\"ppc64\", \"v44\"): 976,\n    (\"ppc64\", \"vsr45\"): 992,\n    (\"ppc64\", \"v45\"): 992,\n    (\"ppc64\", \"vsr46\"): 1008,\n    (\"ppc64\", \"v46\"): 1008,\n    (\"ppc64\", \"vsr47\"): 1024,\n    (\"ppc64\", \"v47\"): 1024,\n    (\"ppc64\", \"vsr48\"): 1040,\n    (\"ppc64\", \"v48\"): 1040,\n    (\"ppc64\", \"vsr49\"): 1056,\n    (\"ppc64\", \"v49\"): 1056,\n    (\"ppc64\", \"vsr50\"): 1072,\n    (\"ppc64\", \"v50\"): 1072,\n    (\"ppc64\", \"vsr51\"): 1088,\n    (\"ppc64\", \"v51\"): 1088,\n    (\"ppc64\", \"vsr52\"): 1104,\n    (\"ppc64\", \"v52\"): 1104,\n    (\"ppc64\", \"vsr53\"): 1120,\n    (\"ppc64\", \"v53\"): 1120,\n    (\"ppc64\", \"vsr54\"): 1136,\n    (\"ppc64\", \"v54\"): 1136,\n    (\"ppc64\", \"vsr55\"): 1152,\n    (\"ppc64\", \"v55\"): 1152,\n    (\"ppc64\", \"vsr56\"): 1168,\n    (\"ppc64\", \"v56\"): 1168,\n    (\"ppc64\", \"vsr57\"): 1184,\n    (\"ppc64\", \"v57\"): 1184,\n    (\"ppc64\", \"vsr58\"): 1200,\n    (\"ppc64\", \"v58\"): 1200,\n    (\"ppc64\", \"vsr59\"): 1216,\n    (\"ppc64\", \"v59\"): 1216,\n    (\"ppc64\", \"vsr60\"): 1232,\n    (\"ppc64\", \"v60\"): 1232,\n    (\"ppc64\", \"vsr61\"): 1248,\n    (\"ppc64\", \"v61\"): 1248,\n    (\"ppc64\", \"vsr62\"): 1264,\n    (\"ppc64\", \"v62\"): 1264,\n    (\"ppc64\", \"vsr63\"): 1280,\n    (\"ppc64\", \"v63\"): 1280,\n    (\"ppc64\", \"cia\"): 1296,\n    (\"ppc64\", \"ip\"): 1296,\n    (\"ppc64\", \"pc\"): 1296,\n    (\"ppc64\", \"lr\"): 1304,\n    (\"ppc64\", \"ctr\"): 1312,\n    (\"ppc64\", \"xer_so\"): 1320,\n    (\"ppc64\", \"xer_ov\"): 1321,\n    (\"ppc64\", \"xer_ca\"): 1322,\n    (\"ppc64\", \"xer_bc\"): 1323,\n    (\"ppc64\", \"cr0_321\"): 1324,\n    (\"ppc64\", \"cr0_0\"): 1325,\n    (\"ppc64\", \"cr0\"): 1325,\n    (\"ppc64\", \"cr1_321\"): 1326,\n    (\"ppc64\", \"cr1_0\"): 1327,\n    (\"ppc64\", \"cr1\"): 1327,\n    (\"ppc64\", \"cr2_321\"): 1328,\n    (\"ppc64\", \"cr2_0\"): 1329,\n    (\"ppc64\", \"cr2\"): 1329,\n    (\"ppc64\", \"cr3_321\"): 1330,\n    (\"ppc64\", \"cr3_0\"): 1331,\n    (\"ppc64\", \"cr3\"): 1331,\n    (\"ppc64\", \"cr4_321\"): 1332,\n    (\"ppc64\", \"cr4_0\"): 1333,\n    (\"ppc64\", \"cr4\"): 1333,\n    (\"ppc64\", \"cr5_321\"): 1334,\n    (\"ppc64\", \"cr5_0\"): 1335,\n    (\"ppc64\", \"cr5\"): 1335,\n    (\"ppc64\", \"cr6_321\"): 1336,\n    (\"ppc64\", \"cr6_0\"): 1337,\n    (\"ppc64\", \"cr6\"): 1337,\n    (\"ppc64\", \"cr7_321\"): 1338,\n    (\"ppc64\", \"cr7_0\"): 1339,\n    (\"ppc64\", \"cr7\"): 1339,\n    (\"ppc64\", \"fpround\"): 1340,\n    (\"ppc64\", \"dfpround\"): 1341,\n    (\"ppc64\", \"c_fpcc\"): 1342,\n    (\"ppc64\", \"vrsave\"): 1344,\n    (\"ppc64\", \"vscr\"): 1348,\n    (\"ppc64\", \"emnote\"): 1352,\n    (\"ppc64\", \"cmstart\"): 1360,\n    (\"ppc64\", \"cmlen\"): 1368,\n    (\"ppc64\", \"nraddr\"): 1376,\n    (\"ppc64\", \"nraddr_gpr2\"): 1384,\n    (\"ppc64\", \"redir_sp\"): 1392,\n    (\"ppc64\", \"redir_stack\"): 1400,\n    (\"ppc64\", \"ip_at_syscall\"): 1656,\n    (\"ppc64\", \"sprg3_ro\"): 1664,\n    (\"ppc64\", \"tfhar\"): 1672,\n    (\"ppc64\", \"texasr\"): 1680,\n    (\"ppc64\", \"tfiar\"): 1688,\n    (\"ppc64\", \"ppr\"): 1696,\n    (\"ppc64\", \"texasru\"): 1704,\n    (\"ppc64\", \"pspb\"): 1708,\n    (\"s390x\", \"ia\"): 720,\n    (\"s390x\", \"ip\"): 720,\n    (\"s390x\", \"pc\"): 720,\n    (\"s390x\", \"r0\"): 576,\n    (\"s390x\", \"r1\"): 584,\n    (\"s390x\", \"r1_32\"): 588,\n    (\"s390x\", \"r2\"): 592,\n    (\"s390x\", \"r2_32\"): 596,\n    (\"s390x\", \"r3\"): 600,\n    (\"s390x\", \"r3_32\"): 604,\n    (\"s390x\", \"r4\"): 608,\n    (\"s390x\", \"r4_32\"): 612,\n    (\"s390x\", \"r5\"): 616,\n    (\"s390x\", \"r5_32\"): 620,\n    (\"s390x\", \"r6\"): 624,\n    (\"s390x\", \"r6_32\"): 628,\n    (\"s390x\", \"r7\"): 632,\n    (\"s390x\", \"r7_32\"): 636,\n    (\"s390x\", \"r8\"): 640,\n    (\"s390x\", \"r8_32\"): 644,\n    (\"s390x\", \"r9\"): 648,\n    (\"s390x\", \"r9_32\"): 652,\n    (\"s390x\", \"r10\"): 656,\n    (\"s390x\", \"r10_32\"): 660,\n    (\"s390x\", \"r11\"): 664,\n    (\"s390x\", \"bp\"): 664,\n    (\"s390x\", \"r11_32\"): 668,\n    (\"s390x\", \"r12\"): 672,\n    (\"s390x\", \"r12_32\"): 676,\n    (\"s390x\", \"r13\"): 680,\n    (\"s390x\", \"r13_32\"): 684,\n    (\"s390x\", \"r14\"): 688,\n    (\"s390x\", \"lr\"): 688,\n    (\"s390x\", \"r15\"): 696,\n    (\"s390x\", \"sp\"): 696,\n    (\"s390x\", \"v0\"): 64,\n    (\"s390x\", \"f0\"): 64,\n    (\"s390x\", \"v1\"): 80,\n    (\"s390x\", \"f1\"): 80,\n    (\"s390x\", \"v2\"): 96,\n    (\"s390x\", \"f2\"): 96,\n    (\"s390x\", \"v3\"): 112,\n    (\"s390x\", \"f3\"): 112,\n    (\"s390x\", \"v4\"): 128,\n    (\"s390x\", \"f4\"): 128,\n    (\"s390x\", \"v5\"): 144,\n    (\"s390x\", \"f5\"): 144,\n    (\"s390x\", \"v6\"): 160,\n    (\"s390x\", \"f6\"): 160,\n    (\"s390x\", \"v7\"): 176,\n    (\"s390x\", \"f7\"): 176,\n    (\"s390x\", \"v8\"): 192,\n    (\"s390x\", \"f8\"): 192,\n    (\"s390x\", \"v9\"): 208,\n    (\"s390x\", \"f9\"): 208,\n    (\"s390x\", \"v10\"): 224,\n    (\"s390x\", \"f10\"): 224,\n    (\"s390x\", \"v11\"): 240,\n    (\"s390x\", \"f11\"): 240,\n    (\"s390x\", \"v12\"): 256,\n    (\"s390x\", \"f12\"): 256,\n    (\"s390x\", \"v13\"): 272,\n    (\"s390x\", \"f13\"): 272,\n    (\"s390x\", \"v14\"): 288,\n    (\"s390x\", \"f14\"): 288,\n    (\"s390x\", \"v15\"): 304,\n    (\"s390x\", \"f15\"): 304,\n    (\"s390x\", \"v16\"): 320,\n    (\"s390x\", \"v17\"): 336,\n    (\"s390x\", \"v18\"): 352,\n    (\"s390x\", \"v19\"): 368,\n    (\"s390x\", \"v20\"): 384,\n    (\"s390x\", \"v21\"): 400,\n    (\"s390x\", \"v22\"): 416,\n    (\"s390x\", \"v23\"): 432,\n    (\"s390x\", \"v24\"): 448,\n    (\"s390x\", \"v25\"): 464,\n    (\"s390x\", \"v26\"): 480,\n    (\"s390x\", \"v27\"): 496,\n    (\"s390x\", \"v28\"): 512,\n    (\"s390x\", \"v29\"): 528,\n    (\"s390x\", \"v30\"): 544,\n    (\"s390x\", \"v31\"): 560,\n    (\"s390x\", \"a0\"): 0,\n    (\"s390x\", \"a1\"): 4,\n    (\"s390x\", \"a2\"): 8,\n    (\"s390x\", \"a3\"): 12,\n    (\"s390x\", \"a4\"): 16,\n    (\"s390x\", \"a5\"): 20,\n    (\"s390x\", \"a6\"): 24,\n    (\"s390x\", \"a7\"): 28,\n    (\"s390x\", \"a8\"): 32,\n    (\"s390x\", \"a9\"): 36,\n    (\"s390x\", \"a10\"): 40,\n    (\"s390x\", \"a11\"): 44,\n    (\"s390x\", \"a12\"): 48,\n    (\"s390x\", \"a13\"): 52,\n    (\"s390x\", \"a14\"): 56,\n    (\"s390x\", \"a15\"): 60,\n    (\"s390x\", \"nraddr\"): 768,\n    (\"s390x\", \"cmstart\"): 776,\n    (\"s390x\", \"cmlen\"): 784,\n    (\"s390x\", \"ip_at_syscall\"): 792,\n    (\"s390x\", \"emnote\"): 800,\n    (\"mips32\", \"zero\"): 8,\n    (\"mips32\", \"r0\"): 8,\n    (\"mips32\", \"at\"): 12,\n    (\"mips32\", \"r1\"): 12,\n    (\"mips32\", \"v0\"): 16,\n    (\"mips32\", \"r2\"): 16,\n    (\"mips32\", \"v1\"): 20,\n    (\"mips32\", \"r3\"): 20,\n    (\"mips32\", \"a0\"): 24,\n    (\"mips32\", \"r4\"): 24,\n    (\"mips32\", \"a1\"): 28,\n    (\"mips32\", \"r5\"): 28,\n    (\"mips32\", \"a2\"): 32,\n    (\"mips32\", \"r6\"): 32,\n    (\"mips32\", \"a3\"): 36,\n    (\"mips32\", \"r7\"): 36,\n    (\"mips32\", \"t0\"): 40,\n    (\"mips32\", \"r8\"): 40,\n    (\"mips32\", \"t1\"): 44,\n    (\"mips32\", \"r9\"): 44,\n    (\"mips32\", \"t2\"): 48,\n    (\"mips32\", \"r10\"): 48,\n    (\"mips32\", \"t3\"): 52,\n    (\"mips32\", \"r11\"): 52,\n    (\"mips32\", \"t4\"): 56,\n    (\"mips32\", \"r12\"): 56,\n    (\"mips32\", \"t5\"): 60,\n    (\"mips32\", \"r13\"): 60,\n    (\"mips32\", \"t6\"): 64,\n    (\"mips32\", \"r14\"): 64,\n    (\"mips32\", \"t7\"): 68,\n    (\"mips32\", \"r15\"): 68,\n    (\"mips32\", \"s0\"): 72,\n    (\"mips32\", \"r16\"): 72,\n    (\"mips32\", \"s1\"): 76,\n    (\"mips32\", \"r17\"): 76,\n    (\"mips32\", \"s2\"): 80,\n    (\"mips32\", \"r18\"): 80,\n    (\"mips32\", \"s3\"): 84,\n    (\"mips32\", \"r19\"): 84,\n    (\"mips32\", \"s4\"): 88,\n    (\"mips32\", \"r20\"): 88,\n    (\"mips32\", \"s5\"): 92,\n    (\"mips32\", \"r21\"): 92,\n    (\"mips32\", \"s6\"): 96,\n    (\"mips32\", \"r22\"): 96,\n    (\"mips32\", \"s7\"): 100,\n    (\"mips32\", \"r23\"): 100,\n    (\"mips32\", \"t8\"): 104,\n    (\"mips32\", \"r24\"): 104,\n    (\"mips32\", \"t9\"): 108,\n    (\"mips32\", \"r25\"): 108,\n    (\"mips32\", \"k0\"): 112,\n    (\"mips32\", \"r26\"): 112,\n    (\"mips32\", \"k1\"): 116,\n    (\"mips32\", \"r27\"): 116,\n    (\"mips32\", \"gp\"): 120,\n    (\"mips32\", \"r28\"): 120,\n    (\"mips32\", \"sp\"): 124,\n    (\"mips32\", \"r29\"): 124,\n    (\"mips32\", \"s8\"): 128,\n    (\"mips32\", \"r30\"): 128,\n    (\"mips32\", \"fp\"): 128,\n    (\"mips32\", \"bp\"): 128,\n    (\"mips32\", \"ra\"): 132,\n    (\"mips32\", \"r31\"): 132,\n    (\"mips32\", \"lr\"): 132,\n    (\"mips32\", \"pc\"): 136,\n    (\"mips32\", \"ip\"): 136,\n    (\"mips32\", \"hi\"): 140,\n    (\"mips32\", \"lo\"): 144,\n    (\"mips32\", \"f0\"): 152,\n    (\"mips32\", \"f0_lo\"): 152,\n    (\"mips32\", \"f1\"): 160,\n    (\"mips32\", \"f1_lo\"): 160,\n    (\"mips32\", \"f2\"): 168,\n    (\"mips32\", \"f2_lo\"): 168,\n    (\"mips32\", \"f3\"): 176,\n    (\"mips32\", \"f3_lo\"): 176,\n    (\"mips32\", \"f4\"): 184,\n    (\"mips32\", \"f4_lo\"): 184,\n    (\"mips32\", \"f5\"): 192,\n    (\"mips32\", \"f5_lo\"): 192,\n    (\"mips32\", \"f6\"): 200,\n    (\"mips32\", \"f6_lo\"): 200,\n    (\"mips32\", \"f7\"): 208,\n    (\"mips32\", \"f7_lo\"): 208,\n    (\"mips32\", \"f8\"): 216,\n    (\"mips32\", \"f8_lo\"): 216,\n    (\"mips32\", \"f9\"): 224,\n    (\"mips32\", \"f9_lo\"): 224,\n    (\"mips32\", \"f10\"): 232,\n    (\"mips32\", \"f10_lo\"): 232,\n    (\"mips32\", \"f11\"): 240,\n    (\"mips32\", \"f11_lo\"): 240,\n    (\"mips32\", \"f12\"): 248,\n    (\"mips32\", \"f12_lo\"): 248,\n    (\"mips32\", \"f13\"): 256,\n    (\"mips32\", \"f13_lo\"): 256,\n    (\"mips32\", \"f14\"): 264,\n    (\"mips32\", \"f14_lo\"): 264,\n    (\"mips32\", \"f15\"): 272,\n    (\"mips32\", \"f15_lo\"): 272,\n    (\"mips32\", \"f16\"): 280,\n    (\"mips32\", \"f16_lo\"): 280,\n    (\"mips32\", \"f17\"): 288,\n    (\"mips32\", \"f17_lo\"): 288,\n    (\"mips32\", \"f18\"): 296,\n    (\"mips32\", \"f18_lo\"): 296,\n    (\"mips32\", \"f19\"): 304,\n    (\"mips32\", \"f19_lo\"): 304,\n    (\"mips32\", \"f20\"): 312,\n    (\"mips32\", \"f20_lo\"): 312,\n    (\"mips32\", \"f21\"): 320,\n    (\"mips32\", \"f21_lo\"): 320,\n    (\"mips32\", \"f22\"): 328,\n    (\"mips32\", \"f22_lo\"): 328,\n    (\"mips32\", \"f23\"): 336,\n    (\"mips32\", \"f23_lo\"): 336,\n    (\"mips32\", \"f24\"): 344,\n    (\"mips32\", \"f24_lo\"): 344,\n    (\"mips32\", \"f25\"): 352,\n    (\"mips32\", \"f25_lo\"): 352,\n    (\"mips32\", \"f26\"): 360,\n    (\"mips32\", \"f26_lo\"): 360,\n    (\"mips32\", \"f27\"): 368,\n    (\"mips32\", \"f27_lo\"): 368,\n    (\"mips32\", \"f28\"): 376,\n    (\"mips32\", \"f28_lo\"): 376,\n    (\"mips32\", \"f29\"): 384,\n    (\"mips32\", \"f29_lo\"): 384,\n    (\"mips32\", \"f30\"): 392,\n    (\"mips32\", \"f30_lo\"): 392,\n    (\"mips32\", \"f31\"): 400,\n    (\"mips32\", \"f31_lo\"): 400,\n    (\"mips32\", \"fir\"): 408,\n    (\"mips32\", \"fccr\"): 412,\n    (\"mips32\", \"fexr\"): 416,\n    (\"mips32\", \"fenr\"): 420,\n    (\"mips32\", \"fcsr\"): 424,\n    (\"mips32\", \"ulr\"): 428,\n    (\"mips32\", \"emnote\"): 432,\n    (\"mips32\", \"cmstart\"): 436,\n    (\"mips32\", \"cmlen\"): 440,\n    (\"mips32\", \"nraddr\"): 444,\n    (\"mips32\", \"cond\"): 448,\n    (\"mips32\", \"dspcontrol\"): 452,\n    (\"mips32\", \"ac0\"): 456,\n    (\"mips32\", \"ac1\"): 464,\n    (\"mips32\", \"ac2\"): 472,\n    (\"mips32\", \"ac3\"): 480,\n    (\"mips32\", \"cp0_status\"): 488,\n    (\"mips32\", \"ip_at_syscall\"): 492,\n    (\"mips64\", \"zero\"): 16,\n    (\"mips64\", \"r0\"): 16,\n    (\"mips64\", \"at\"): 24,\n    (\"mips64\", \"r1\"): 24,\n    (\"mips64\", \"v0\"): 32,\n    (\"mips64\", \"r2\"): 32,\n    (\"mips64\", \"v1\"): 40,\n    (\"mips64\", \"r3\"): 40,\n    (\"mips64\", \"a0\"): 48,\n    (\"mips64\", \"r4\"): 48,\n    (\"mips64\", \"a1\"): 56,\n    (\"mips64\", \"r5\"): 56,\n    (\"mips64\", \"a2\"): 64,\n    (\"mips64\", \"r6\"): 64,\n    (\"mips64\", \"a3\"): 72,\n    (\"mips64\", \"r7\"): 72,\n    (\"mips64\", \"t0\"): 80,\n    (\"mips64\", \"r8\"): 80,\n    (\"mips64\", \"a4\"): 80,\n    (\"mips64\", \"t1\"): 88,\n    (\"mips64\", \"r9\"): 88,\n    (\"mips64\", \"a5\"): 88,\n    (\"mips64\", \"t2\"): 96,\n    (\"mips64\", \"r10\"): 96,\n    (\"mips64\", \"a6\"): 96,\n    (\"mips64\", \"t3\"): 104,\n    (\"mips64\", \"r11\"): 104,\n    (\"mips64\", \"a7\"): 104,\n    (\"mips64\", \"t4\"): 112,\n    (\"mips64\", \"r12\"): 112,\n    (\"mips64\", \"t5\"): 120,\n    (\"mips64\", \"r13\"): 120,\n    (\"mips64\", \"t6\"): 128,\n    (\"mips64\", \"r14\"): 128,\n    (\"mips64\", \"t7\"): 136,\n    (\"mips64\", \"r15\"): 136,\n    (\"mips64\", \"s0\"): 144,\n    (\"mips64\", \"r16\"): 144,\n    (\"mips64\", \"s1\"): 152,\n    (\"mips64\", \"r17\"): 152,\n    (\"mips64\", \"s2\"): 160,\n    (\"mips64\", \"r18\"): 160,\n    (\"mips64\", \"s3\"): 168,\n    (\"mips64\", \"r19\"): 168,\n    (\"mips64\", \"s4\"): 176,\n    (\"mips64\", \"r20\"): 176,\n    (\"mips64\", \"s5\"): 184,\n    (\"mips64\", \"r21\"): 184,\n    (\"mips64\", \"s6\"): 192,\n    (\"mips64\", \"r22\"): 192,\n    (\"mips64\", \"s7\"): 200,\n    (\"mips64\", \"r23\"): 200,\n    (\"mips64\", \"t8\"): 208,\n    (\"mips64\", \"r24\"): 208,\n    (\"mips64\", \"t9\"): 216,\n    (\"mips64\", \"r25\"): 216,\n    (\"mips64\", \"k0\"): 224,\n    (\"mips64\", \"r26\"): 224,\n    (\"mips64\", \"k1\"): 232,\n    (\"mips64\", \"r27\"): 232,\n    (\"mips64\", \"gp\"): 240,\n    (\"mips64\", \"r28\"): 240,\n    (\"mips64\", \"sp\"): 248,\n    (\"mips64\", \"r29\"): 248,\n    (\"mips64\", \"s8\"): 256,\n    (\"mips64\", \"r30\"): 256,\n    (\"mips64\", \"fp\"): 256,\n    (\"mips64\", \"bp\"): 256,\n    (\"mips64\", \"ra\"): 264,\n    (\"mips64\", \"r31\"): 264,\n    (\"mips64\", \"lr\"): 264,\n    (\"mips64\", \"pc\"): 272,\n    (\"mips64\", \"ip\"): 272,\n    (\"mips64\", \"hi\"): 280,\n    (\"mips64\", \"lo\"): 288,\n    (\"mips64\", \"f0\"): 296,\n    (\"mips64\", \"f0_lo\"): 296,\n    (\"mips64\", \"f1\"): 304,\n    (\"mips64\", \"f1_lo\"): 304,\n    (\"mips64\", \"f2\"): 312,\n    (\"mips64\", \"f2_lo\"): 312,\n    (\"mips64\", \"f3\"): 320,\n    (\"mips64\", \"f3_lo\"): 320,\n    (\"mips64\", \"f4\"): 328,\n    (\"mips64\", \"f4_lo\"): 328,\n    (\"mips64\", \"f5\"): 336,\n    (\"mips64\", \"f5_lo\"): 336,\n    (\"mips64\", \"f6\"): 344,\n    (\"mips64\", \"f6_lo\"): 344,\n    (\"mips64\", \"f7\"): 352,\n    (\"mips64\", \"f7_lo\"): 352,\n    (\"mips64\", \"f8\"): 360,\n    (\"mips64\", \"f8_lo\"): 360,\n    (\"mips64\", \"f9\"): 368,\n    (\"mips64\", \"f9_lo\"): 368,\n    (\"mips64\", \"f10\"): 376,\n    (\"mips64\", \"f10_lo\"): 376,\n    (\"mips64\", \"f11\"): 384,\n    (\"mips64\", \"f11_lo\"): 384,\n    (\"mips64\", \"f12\"): 392,\n    (\"mips64\", \"f12_lo\"): 392,\n    (\"mips64\", \"f13\"): 400,\n    (\"mips64\", \"f13_lo\"): 400,\n    (\"mips64\", \"f14\"): 408,\n    (\"mips64\", \"f14_lo\"): 408,\n    (\"mips64\", \"f15\"): 416,\n    (\"mips64\", \"f15_lo\"): 416,\n    (\"mips64\", \"f16\"): 424,\n    (\"mips64\", \"f16_lo\"): 424,\n    (\"mips64\", \"f17\"): 432,\n    (\"mips64\", \"f17_lo\"): 432,\n    (\"mips64\", \"f18\"): 440,\n    (\"mips64\", \"f18_lo\"): 440,\n    (\"mips64\", \"f19\"): 448,\n    (\"mips64\", \"f19_lo\"): 448,\n    (\"mips64\", \"f20\"): 456,\n    (\"mips64\", \"f20_lo\"): 456,\n    (\"mips64\", \"f21\"): 464,\n    (\"mips64\", \"f21_lo\"): 464,\n    (\"mips64\", \"f22\"): 472,\n    (\"mips64\", \"f22_lo\"): 472,\n    (\"mips64\", \"f23\"): 480,\n    (\"mips64\", \"f23_lo\"): 480,\n    (\"mips64\", \"f24\"): 488,\n    (\"mips64\", \"f24_lo\"): 488,\n    (\"mips64\", \"f25\"): 496,\n    (\"mips64\", \"f25_lo\"): 496,\n    (\"mips64\", \"f26\"): 504,\n    (\"mips64\", \"f26_lo\"): 504,\n    (\"mips64\", \"f27\"): 512,\n    (\"mips64\", \"f27_lo\"): 512,\n    (\"mips64\", \"f28\"): 520,\n    (\"mips64\", \"f28_lo\"): 520,\n    (\"mips64\", \"f29\"): 528,\n    (\"mips64\", \"f29_lo\"): 528,\n    (\"mips64\", \"f30\"): 536,\n    (\"mips64\", \"f30_lo\"): 536,\n    (\"mips64\", \"f31\"): 544,\n    (\"mips64\", \"f31_lo\"): 544,\n    (\"mips64\", \"fir\"): 552,\n    (\"mips64\", \"fccr\"): 556,\n    (\"mips64\", \"fexr\"): 560,\n    (\"mips64\", \"fenr\"): 564,\n    (\"mips64\", \"fcsr\"): 568,\n    (\"mips64\", \"cp0_status\"): 572,\n    (\"mips64\", \"ulr\"): 576,\n    (\"mips64\", \"emnote\"): 584,\n    (\"mips64\", \"cond\"): 588,\n    (\"mips64\", \"cmstart\"): 592,\n    (\"mips64\", \"cmlen\"): 600,\n    (\"mips64\", \"nraddr\"): 608,\n    (\"mips64\", \"ip_at_syscall\"): 616,\n    (\"riscv64\", \"x0\"): 16,\n    (\"riscv64\", \"zero\"): 16,\n    (\"riscv64\", \"x1\"): 24,\n    (\"riscv64\", \"ra\"): 24,\n    (\"riscv64\", \"lr\"): 24,\n    (\"riscv64\", \"x2\"): 32,\n    (\"riscv64\", \"sp\"): 32,\n    (\"riscv64\", \"x3\"): 40,\n    (\"riscv64\", \"gp\"): 40,\n    (\"riscv64\", \"x4\"): 48,\n    (\"riscv64\", \"tp\"): 48,\n    (\"riscv64\", \"x5\"): 56,\n    (\"riscv64\", \"t0\"): 56,\n    (\"riscv64\", \"x6\"): 64,\n    (\"riscv64\", \"t1\"): 64,\n    (\"riscv64\", \"x7\"): 72,\n    (\"riscv64\", \"t2\"): 72,\n    (\"riscv64\", \"x9\"): 88,\n    (\"riscv64\", \"s1\"): 88,\n    (\"riscv64\", \"x10\"): 96,\n    (\"riscv64\", \"a0\"): 96,\n    (\"riscv64\", \"x11\"): 104,\n    (\"riscv64\", \"a1\"): 104,\n    (\"riscv64\", \"x12\"): 112,\n    (\"riscv64\", \"a2\"): 112,\n    (\"riscv64\", \"x13\"): 120,\n    (\"riscv64\", \"a3\"): 120,\n    (\"riscv64\", \"x14\"): 128,\n    (\"riscv64\", \"a4\"): 128,\n    (\"riscv64\", \"x15\"): 136,\n    (\"riscv64\", \"a5\"): 136,\n    (\"riscv64\", \"x16\"): 144,\n    (\"riscv64\", \"a6\"): 144,\n    (\"riscv64\", \"x17\"): 152,\n    (\"riscv64\", \"a7\"): 152,\n    (\"riscv64\", \"x18\"): 160,\n    (\"riscv64\", \"s2\"): 160,\n    (\"riscv64\", \"x19\"): 168,\n    (\"riscv64\", \"s3\"): 168,\n    (\"riscv64\", \"x20\"): 176,\n    (\"riscv64\", \"s4\"): 176,\n    (\"riscv64\", \"x21\"): 184,\n    (\"riscv64\", \"s5\"): 184,\n    (\"riscv64\", \"x22\"): 192,\n    (\"riscv64\", \"s6\"): 192,\n    (\"riscv64\", \"x23\"): 200,\n    (\"riscv64\", \"s7\"): 200,\n    (\"riscv64\", \"x24\"): 208,\n    (\"riscv64\", \"s8\"): 208,\n    (\"riscv64\", \"x25\"): 216,\n    (\"riscv64\", \"s9\"): 216,\n    (\"riscv64\", \"x26\"): 224,\n    (\"riscv64\", \"s10\"): 224,\n    (\"riscv64\", \"x27\"): 232,\n    (\"riscv64\", \"s11\"): 232,\n    (\"riscv64\", \"x28\"): 240,\n    (\"riscv64\", \"t3\"): 240,\n    (\"riscv64\", \"x29\"): 248,\n    (\"riscv64\", \"t4\"): 248,\n    (\"riscv64\", \"x30\"): 256,\n    (\"riscv64\", \"t5\"): 256,\n    (\"riscv64\", \"x31\"): 264,\n    (\"riscv64\", \"t6\"): 264,\n    (\"riscv64\", \"pc\"): 272,\n    (\"riscv64\", \"ip\"): 272,\n    (\"riscv64\", \"f0\"): 280,\n    (\"riscv64\", \"ft0\"): 280,\n    (\"riscv64\", \"f1\"): 288,\n    (\"riscv64\", \"ft1\"): 288,\n    (\"riscv64\", \"f2\"): 296,\n    (\"riscv64\", \"ft2\"): 296,\n    (\"riscv64\", \"f3\"): 304,\n    (\"riscv64\", \"ft3\"): 304,\n    (\"riscv64\", \"f4\"): 312,\n    (\"riscv64\", \"ft4\"): 312,\n    (\"riscv64\", \"f5\"): 320,\n    (\"riscv64\", \"ft5\"): 320,\n    (\"riscv64\", \"f6\"): 328,\n    (\"riscv64\", \"ft6\"): 328,\n    (\"riscv64\", \"f7\"): 336,\n    (\"riscv64\", \"ft7\"): 336,\n    (\"riscv64\", \"f9\"): 352,\n    (\"riscv64\", \"fs1\"): 352,\n    (\"riscv64\", \"f10\"): 360,\n    (\"riscv64\", \"fa0\"): 360,\n    (\"riscv64\", \"f11\"): 368,\n    (\"riscv64\", \"fa1\"): 368,\n    (\"riscv64\", \"f12\"): 376,\n    (\"riscv64\", \"fa2\"): 376,\n    (\"riscv64\", \"f13\"): 384,\n    (\"riscv64\", \"fa3\"): 384,\n    (\"riscv64\", \"f14\"): 392,\n    (\"riscv64\", \"fa4\"): 392,\n    (\"riscv64\", \"f15\"): 400,\n    (\"riscv64\", \"fa5\"): 400,\n    (\"riscv64\", \"f16\"): 408,\n    (\"riscv64\", \"fa6\"): 408,\n    (\"riscv64\", \"f17\"): 416,\n    (\"riscv64\", \"fa7\"): 416,\n    (\"riscv64\", \"f18\"): 424,\n    (\"riscv64\", \"fs2\"): 424,\n    (\"riscv64\", \"f19\"): 432,\n    (\"riscv64\", \"fs3\"): 432,\n    (\"riscv64\", \"f20\"): 440,\n    (\"riscv64\", \"fs4\"): 440,\n    (\"riscv64\", \"f21\"): 448,\n    (\"riscv64\", \"fs5\"): 448,\n    (\"riscv64\", \"f22\"): 456,\n    (\"riscv64\", \"fs6\"): 456,\n    (\"riscv64\", \"f23\"): 464,\n    (\"riscv64\", \"fs7\"): 464,\n    (\"riscv64\", \"f24\"): 472,\n    (\"riscv64\", \"fs8\"): 472,\n    (\"riscv64\", \"f25\"): 480,\n    (\"riscv64\", \"fs9\"): 480,\n    (\"riscv64\", \"f26\"): 488,\n    (\"riscv64\", \"fs10\"): 488,\n    (\"riscv64\", \"f27\"): 496,\n    (\"riscv64\", \"fs11\"): 496,\n    (\"riscv64\", \"f28\"): 504,\n    (\"riscv64\", \"ft8\"): 504,\n    (\"riscv64\", \"f29\"): 512,\n    (\"riscv64\", \"ft9\"): 512,\n    (\"riscv64\", \"f30\"): 520,\n    (\"riscv64\", \"ft10\"): 520,\n    (\"riscv64\", \"f31\"): 528,\n    (\"riscv64\", \"ft11\"): 528,\n}\n"
  },
  {
    "path": "pyvex/arches.py",
    "content": "from ._register_info import REGISTER_OFFSETS\nfrom .enums import default_vex_archinfo, vex_endness_from_string\nfrom .types import Register\nfrom .vex_ffi import guest_offsets\n\n\nclass PyvexArch:\n    \"\"\"\n    An architecture definition for use with pyvex - usable version.\n    \"\"\"\n\n    def __init__(self, name: str, bits: int, memory_endness: str, instruction_endness: str = \"Iend_BE\"):\n        self.name = name\n        self.bits = bits\n        self.memory_endness = memory_endness\n        self.instruction_endness = instruction_endness\n        self.byte_width = 8\n        self.register_list: list[Register] = []\n        self.registers: dict[str, tuple[int, int]] = {}\n        self.vex_arch = {\n            \"X86\": \"VexArchX86\",\n            \"AMD64\": \"VexArchAMD64\",\n            \"ARM\": \"VexArchARM\",\n            \"ARM64\": \"VexArchARM64\",\n            \"PPC32\": \"VexArchPPC32\",\n            \"PPC64\": \"VexArchPPC64\",\n            \"S390X\": \"VexArchS390X\",\n            \"MIPS32\": \"VexArchMIPS32\",\n            \"MIPS64\": \"VexArchMIPS64\",\n            \"RISCV64\": \"VexArchRISCV64\",\n        }[name]\n        self.ip_offset = guest_offsets[\n            (\n                self.vex_name_small,\n                {\n                    \"X86\": \"eip\",\n                    \"AMD64\": \"rip\",\n                    \"ARM\": \"r15t\",\n                    \"ARM64\": \"pc\",\n                    \"PPC32\": \"cia\",\n                    \"PPC64\": \"cia\",\n                    \"S390X\": \"ia\",\n                    \"MIPS32\": \"pc\",\n                    \"MIPS64\": \"pc\",\n                    \"RISCV64\": \"pc\",\n                }[name],\n            )\n        ]\n        self.vex_archinfo = default_vex_archinfo()\n        if memory_endness == \"Iend_BE\":\n            self.vex_archinfo[\"endness\"] = vex_endness_from_string(\"VexEndnessBE\")\n\n    def __repr__(self):\n        return f\"<PyvexArch {self.name}>\"\n\n    @property\n    def vex_name_small(self):\n        return self.vex_arch[7:].lower()\n\n    def translate_register_name(self, offset, size=None):  # pylint: disable=unused-argument\n        for (arch, reg), offset2 in guest_offsets.items():\n            if arch == self.vex_name_small and offset2 == offset:\n                return reg\n        for (arch, reg), offset2 in REGISTER_OFFSETS.items():\n            if arch == self.vex_name_small and offset2 == offset:\n                return reg\n        return str(offset)\n\n    def get_register_offset(self, name: str) -> int:\n        arch_reg_tuple = (self.vex_name_small, name)\n        if arch_reg_tuple in guest_offsets:\n            return guest_offsets[arch_reg_tuple]\n        elif arch_reg_tuple in REGISTER_OFFSETS:\n            return REGISTER_OFFSETS[arch_reg_tuple]\n        else:\n            raise KeyError(f\"Unknown register {name} for architecture {self.name}\")\n\n\nARCH_X86 = PyvexArch(\"X86\", 32, \"Iend_LE\")\nARCH_AMD64 = PyvexArch(\"AMD64\", 64, \"Iend_LE\")\nARCH_ARM_LE = PyvexArch(\"ARM\", 32, \"Iend_LE\", instruction_endness=\"Iend_LE\")\nARCH_ARM_BE_LE = PyvexArch(\"ARM\", 32, \"Iend_BE\", instruction_endness=\"Iend_LE\")\nARCH_ARM_BE = PyvexArch(\"ARM\", 32, \"Iend_LE\")\nARCH_ARM64_LE = PyvexArch(\"ARM64\", 64, \"Iend_LE\", instruction_endness=\"Iend_LE\")\nARCH_ARM64_BE = PyvexArch(\"ARM64\", 64, \"Iend_BE\")\nARCH_PPC32 = PyvexArch(\"PPC32\", 32, \"Iend_BE\")\nARCH_PPC64_BE = PyvexArch(\"PPC64\", 64, \"Iend_BE\")\nARCH_PPC64_LE = PyvexArch(\"PPC64\", 64, \"Iend_LE\")\nARCH_S390X = PyvexArch(\"S390X\", 64, \"Iend_BE\")\nARCH_MIPS32_BE = PyvexArch(\"MIPS32\", 32, \"Iend_BE\")\nARCH_MIPS32_LE = PyvexArch(\"MIPS32\", 32, \"Iend_LE\")\nARCH_MIPS64_BE = PyvexArch(\"MIPS64\", 64, \"Iend_BE\")\nARCH_MIPS64_LE = PyvexArch(\"MIPS64\", 64, \"Iend_LE\")\nARCH_RISCV64_LE = PyvexArch(\"RISCV64\", 64, \"Iend_LE\", instruction_endness=\"Iend_LE\")\n"
  },
  {
    "path": "pyvex/block.py",
    "content": "import copy\nimport itertools\nimport logging\nfrom typing import Optional\n\nfrom . import expr, stmt\nfrom .const import U1, get_type_size\nfrom .const_val import ConstVal\nfrom .data_ref import DataRef\nfrom .enums import VEXObject\nfrom .errors import SkipStatementsError\nfrom .expr import Const, RdTmp\nfrom .native import pvc\nfrom .stmt import (\n    CAS,\n    LLSC,\n    Dirty,\n    Exit,\n    IMark,\n    IRExpr,\n    IRStmt,\n    LoadG,\n    WrTmp,\n    get_enum_from_int,\n    get_int_from_enum,\n)\nfrom .types import Arch\n\nlog = logging.getLogger(\"pyvex.block\")\n\n\nclass IRSB(VEXObject):\n    \"\"\"\n    The IRSB is the primary interface to pyvex. Constructing one of these will make a call into LibVEX to perform a\n    translation.\n\n    IRSB stands for *Intermediate Representation Super-Block*. An IRSB in VEX is a single-entry, multiple-exit code\n    block.\n\n    :ivar arch:             The architecture this block is lifted under. Must duck-type as :class:`archinfo.arch.Arch`\n    :ivar statements:       The statements in this block\n    :vartype statements:    list of :class:`IRStmt`\n    :ivar next:             The expression for the default exit target of this block\n    :vartype next:          :class:`IRExpr`\n    :ivar int offsIP:       The offset of the instruction pointer in the VEX guest state\n    :ivar int stmts_used:   The number of statements in this IRSB\n    :ivar str jumpkind:     The type of this block's default jump (call, boring, syscall, etc) as a VEX enum string\n    :ivar bool direct_next: Whether this block ends with a direct (not indirect) jump or branch\n    :ivar int size:         The size of this block in bytes\n    :ivar int addr:         The address of this basic block, i.e. the address in the first IMark\n    \"\"\"\n\n    __slots__ = [\n        \"addr\",\n        \"arch\",\n        \"statements\",\n        \"next\",\n        \"_tyenv\",\n        \"jumpkind\",\n        \"is_noop_block\",\n        \"_direct_next\",\n        \"_size\",\n        \"_instructions\",\n        \"_exit_statements\",\n        \"default_exit_target\",\n        \"_instruction_addresses\",\n        \"data_refs\",\n        \"const_vals\",\n    ]\n\n    # The following constants shall match the defs in pyvex.h\n    MAX_EXITS = 400\n    MAX_DATA_REFS = 2000\n    MAX_CONST_VALS = 1000\n\n    def __init__(\n        self,\n        data,\n        mem_addr,\n        arch: Arch,\n        max_inst=None,\n        max_bytes=None,\n        bytes_offset=0,\n        traceflags=0,\n        opt_level=1,\n        num_inst=None,\n        num_bytes=None,\n        strict_block_end=False,\n        skip_stmts=False,\n        collect_data_refs=False,\n        cross_insn_opt=True,\n    ):\n        \"\"\"\n        :param data:                The bytes to lift. Can be either a string of bytes or a cffi buffer object.\n                                    You may also pass None to initialize an empty IRSB.\n        :type data:                 str or bytes or cffi.FFI.CData or None\n        :param int mem_addr:        The address to lift the data at.\n        :param arch:                The architecture to lift the data as.\n        :param max_inst:            The maximum number of instructions to lift. (See note below)\n        :param max_bytes:           The maximum number of bytes to use.\n        :param num_inst:            Replaces max_inst if max_inst is None. If set to None as well, no instruction limit\n                                    is used.\n        :param num_bytes:           Replaces max_bytes if max_bytes is None. If set to None as well, no  byte limit is\n                                    used.\n        :param bytes_offset:        The offset into `data` to start lifting at. Note that for ARM THUMB mode, both\n                                    `mem_addr` and `bytes_offset` must be odd (typically `bytes_offset` is set to 1).\n        :param traceflags:          The libVEX traceflags, controlling VEX debug prints.\n        :param opt_level:           The level of optimization to apply to the IR, -1 through 2. -1 is the strictest\n                                    unoptimized level, 0 is unoptimized but will perform some lookahead/lookbehind\n                                    optimizations, 1 performs constant propogation, and 2 performs loop unrolling,\n                                    which honestly doesn't make much sense in the context of pyvex. The default is 1.\n        :param strict_block_end:    Should the LibVEX arm-thumb split block at some instructions, for example CB{N}Z.\n\n        .. note:: Explicitly specifying the number of instructions to lift (`max_inst`) may not always work\n                  exactly as expected. For example, on MIPS, it is meaningless to lift a branch or jump\n                  instruction without its delay slot. VEX attempts to Do The Right Thing by possibly decoding\n                  fewer instructions than requested. Specifically, this means that lifting a branch or jump\n                  on MIPS as a single instruction (`max_inst=1`) will result in an empty IRSB, and subsequent\n                  attempts to run this block will raise `SimIRSBError('Empty IRSB passed to SimIRSB.')`.\n\n        .. note:: If no instruction and byte limit is used, pyvex will continue lifting the block until the block\n                  ends properly or until it runs out of data to lift.\n        \"\"\"\n        if max_inst is None:\n            max_inst = num_inst\n        if max_bytes is None:\n            max_bytes = num_bytes\n        VEXObject.__init__(self)\n        self.addr = mem_addr\n        self.arch: Arch = arch\n\n        self.statements: list[IRStmt] = []\n        self.next: IRExpr = Const(U1(0))\n        self._tyenv: Optional[\"IRTypeEnv\"] = None\n        self.jumpkind: str = \"UNSET\"\n        self._direct_next: bool | None = None\n        self._size: int | None = None\n        self._instructions: int | None = None\n        self._exit_statements: tuple[tuple[int, int, IRStmt], ...] | None = None\n        self.is_noop_block: bool = False\n        self.default_exit_target = None\n        self.data_refs = ()\n        self.const_vals = ()\n        self._instruction_addresses: tuple[int, ...] = ()\n\n        if data is not None:\n            # This is the slower path (because we need to call _from_py() to copy the content in the returned IRSB to\n            # the current IRSB instance. You should always call `lift()` directly. This method is kept for compatibility\n            # concerns.\n            from pyvex.lifting import lift\n\n            irsb = lift(\n                data,\n                mem_addr,\n                arch,\n                max_bytes=max_bytes,\n                max_inst=max_inst,\n                bytes_offset=bytes_offset,\n                opt_level=opt_level,\n                traceflags=traceflags,\n                strict_block_end=strict_block_end,\n                skip_stmts=skip_stmts,\n                collect_data_refs=collect_data_refs,\n                cross_insn_opt=cross_insn_opt,\n            )\n            self._from_py(irsb)\n\n    @staticmethod\n    def empty_block(arch, addr, statements=None, nxt=None, tyenv=None, jumpkind=None, direct_next=None, size=None):\n        block = IRSB(None, addr, arch)\n        block._set_attributes(statements, nxt, tyenv, jumpkind, direct_next, size=size)\n        return block\n\n    @property\n    def tyenv(self) -> \"IRTypeEnv\":\n        if self._tyenv is None:\n            self._tyenv = IRTypeEnv(self.arch)\n        return self._tyenv\n\n    @tyenv.setter\n    def tyenv(self, v):\n        self._tyenv = v\n\n    @property\n    def has_statements(self) -> bool:\n        return self.statements is not None and bool(self.statements)\n\n    @property\n    def exit_statements(self) -> tuple[tuple[int, int, IRStmt], ...]:\n        if self._exit_statements is not None:\n            return self._exit_statements\n\n        # Delayed process\n        if not self.has_statements:\n            return ()\n\n        exit_statements = []\n\n        ins_addr = None\n        for idx, stmt_ in enumerate(self.statements):\n            if type(stmt_) is IMark:\n                ins_addr = stmt_.addr + stmt_.delta\n            elif type(stmt_) is Exit:\n                assert ins_addr is not None\n                exit_statements.append((ins_addr, idx, stmt_))\n\n        self._exit_statements = tuple(exit_statements)\n        return self._exit_statements\n\n    def copy(self) -> \"IRSB\":\n        return copy.deepcopy(self)\n\n    def extend(self, extendwith) -> None:\n        \"\"\"\n        Appends an irsb to the current irsb. The irsb that is appended is invalidated. The appended irsb's jumpkind and\n        default exit are used.\n        :param extendwith:     The IRSB to append to this IRSB\n        :vartype extendwith:   :class:`IRSB`\n        \"\"\"\n        if self.stmts_used == 0:\n            self._from_py(extendwith)\n            return\n\n        conversion_dict = {}\n        invalid_vals = (0xFFFFFFFF, -1)\n\n        new_size = self.size + extendwith.size\n        new_instructions = self.instructions + extendwith.instructions\n        new_direct_next = extendwith.direct_next\n\n        def convert_tmp(tmp):\n            \"\"\"\n            Converts a tmp from the appended-block into one in the appended-to-block. Creates a new tmp if it does not\n            already exist. Prevents collisions in tmp numbers between the two blocks.\n            :param tmp:       The tmp number to convert\n            \"\"\"\n            if tmp not in conversion_dict:\n                tmp_type = extendwith.tyenv.lookup(tmp)\n                conversion_dict[tmp] = self.tyenv.add(tmp_type)\n            return conversion_dict[tmp]\n\n        def convert_expr(expr_):\n            \"\"\"\n            Converts a VEX expression to use tmps in the appended-block instead of the appended-to-block. Used to\n            prevent collisions in tmp numbers between the two blocks.\n            :param tmp:       The VEX expression to convert\n            :vartype expr:    :class:`IRExpr`\n            \"\"\"\n            if type(expr_) is RdTmp:\n                return RdTmp.get_instance(convert_tmp(expr_.tmp))\n            return expr_\n\n        for stmt_ in extendwith.statements:\n            stmttype = type(stmt_)\n            if stmttype is WrTmp:\n                stmt_.tmp = convert_tmp(stmt_.tmp)\n            elif stmttype is LoadG:\n                stmt_.dst = convert_tmp(stmt_.dst)\n            elif stmttype is LLSC:\n                stmt_.result = convert_tmp(stmt_.result)\n            elif stmttype is Dirty:\n                if stmt_.tmp not in invalid_vals:\n                    stmt_.tmp = convert_tmp(stmt_.tmp)\n                for e in stmt_.args:\n                    convert_expr(e)\n            elif stmttype is CAS:\n                if stmt_.oldLo not in invalid_vals:\n                    stmt_.oldLo = convert_tmp(stmt_.oldLo)\n                if stmt_.oldHi not in invalid_vals:\n                    stmt_.oldHi = convert_tmp(stmt_.oldHi)\n            # Convert all expressions\n            to_replace = {}\n            for expr_ in stmt_.expressions:\n                replacement = convert_expr(expr_)\n                if replacement is not expr_:\n                    to_replace[expr_] = replacement\n            stmt_.replace_expression(to_replace)\n            # Add the converted statement to self.statements\n            self.statements.append(stmt_)\n        extendwith.next = convert_expr(extendwith.next)\n        self.next = extendwith.next\n        self.jumpkind = extendwith.jumpkind\n        self._size = new_size\n        self._instructions = new_instructions\n        self._direct_next = new_direct_next\n\n        # TODO: Change exit_statements, data_references, etc.\n\n    def invalidate_direct_next(self) -> None:\n        self._direct_next = None\n\n    def pp(self) -> None:\n        \"\"\"\n        Pretty-print the IRSB to stdout.\n        \"\"\"\n        print(self._pp_str())\n\n    def __repr__(self):\n        return f\"IRSB <0x{self.size:x} bytes, {self.instructions} ins., {str(self.arch)}> at 0x{self.addr:x}\"\n\n    def __str__(self):\n        return self._pp_str()\n\n    def __eq__(self, other):\n        return (\n            isinstance(other, IRSB)\n            and self.addr == other.addr\n            and self.arch.name == other.arch.name\n            and self.statements == other.statements\n            and self.next == other.next\n            and self.jumpkind == other.jumpkind\n        )\n\n    def __hash__(self):\n        return hash((IRSB, self.addr, self.arch.name, tuple(self.statements), self.next, self.jumpkind))\n\n    def typecheck(self) -> bool:\n        try:\n            # existence assertions\n            assert self.next is not None, \"Missing next expression\"\n            assert self.jumpkind is not None, \"Missing jumpkind\"\n\n            # Type assertions\n            assert isinstance(self.next, expr.IRExpr), \"Next expression is not an expression\"\n            assert type(self.jumpkind is str), \"Jumpkind is not a string\"\n            assert self.jumpkind.startswith(\"Ijk_\"), \"Jumpkind is not a jumpkind enum\"\n            assert self.tyenv.typecheck(), \"Type environment contains invalid types\"\n\n            # statement assertions\n            last_imark = None\n            for i, st in enumerate(self.statements):\n                assert isinstance(st, stmt.IRStmt), \"Statement %d is not an IRStmt\" % i\n                try:\n                    assert st.typecheck(self.tyenv), \"Statement %d failed to typecheck\" % i\n                except Exception:  # pylint: disable=bare-except\n                    assert False, \"Statement %d errored in typechecking\" % i\n\n                if type(st) is stmt.NoOp:\n                    continue\n                elif type(st) is stmt.IMark:\n                    if last_imark is not None:\n                        # pylint: disable=unsubscriptable-object\n                        assert last_imark[0] + last_imark[1] == st.addr, \"IMarks sizes overlap or have gaps\"\n                    last_imark = (st.addr, st.len)\n                else:\n                    assert last_imark is not None, \"Operation statement appears before IMark\"\n\n            assert last_imark is not None, \"No IMarks present in block\"\n        except AssertionError as e:\n            log.debug(e.args[0])\n            return False\n        return True\n\n    #\n    # alternate constructors\n    #\n\n    @staticmethod\n    def from_c(c_irsb, mem_addr, arch) -> \"IRSB\":\n        irsb = IRSB(None, mem_addr, arch)\n        irsb._from_c(c_irsb)\n        return irsb\n\n    @staticmethod\n    def from_py(tyenv, stmts, next_expr, jumpkind, mem_addr, arch) -> \"IRSB\":\n        irsb = IRSB(None, mem_addr, arch)\n\n        irsb.tyenv = tyenv\n        irsb.statements = stmts\n        irsb.next = next_expr\n        irsb.jumpkind = jumpkind\n        irsb._direct_next = irsb._is_defaultexit_direct_jump()\n\n        return irsb\n\n    #\n    # simple properties useful for analysis\n    #\n\n    @property\n    def stmts_used(self) -> int:\n        if self.statements is None:\n            return 0\n        return len(self.statements)\n\n    @property\n    def offsIP(self) -> int:\n        return self.arch.ip_offset\n\n    @property\n    def direct_next(self):\n        if self._direct_next is None:\n            self._direct_next = self._is_defaultexit_direct_jump()\n        return self._direct_next\n\n    @property\n    def expressions(self):\n        \"\"\"\n        Return an iterator of all expressions contained in the IRSB.\n        \"\"\"\n        for s in self.statements:\n            yield from s.expressions\n        yield self.next\n\n    @property\n    def instructions(self):\n        \"\"\"\n        The number of instructions in this block\n        \"\"\"\n        if self._instructions is None:\n            if self.statements is None:\n                self._instructions = 0\n            else:\n                self._instructions = len([s for s in self.statements if type(s) is stmt.IMark])\n        return self._instructions\n\n    @property\n    def instruction_addresses(self) -> tuple[int, ...]:\n        \"\"\"\n        Addresses of instructions in this block.\n        \"\"\"\n        if self._instruction_addresses is None:\n            if self.statements is None:\n                self._instruction_addresses = ()\n            else:\n                self._instruction_addresses = tuple(\n                    (s.addr + s.delta) for s in self.statements if type(s) is stmt.IMark\n                )\n        return self._instruction_addresses\n\n    @property\n    def size(self):\n        \"\"\"\n        The size of this block, in bytes\n        \"\"\"\n        if self._size is None:\n            self._size = sum(s.len for s in self.statements if type(s) is stmt.IMark)\n        return self._size\n\n    @property\n    def operations(self):\n        \"\"\"\n        A list of all operations done by the IRSB, as libVEX enum names\n        \"\"\"\n        ops = []\n        for e in self.expressions:\n            if hasattr(e, \"op\"):\n                ops.append(e.op)\n        return ops\n\n    @property\n    def all_constants(self):\n        \"\"\"\n        Returns all constants in the block (including incrementing of the program counter) as\n        :class:`pyvex.const.IRConst`.\n        \"\"\"\n        return sum((e.constants for e in self.expressions), [])\n\n    @property\n    def constants(self):\n        \"\"\"\n        The constants (excluding updates of the program counter) in the IRSB as :class:`pyvex.const.IRConst`.\n        \"\"\"\n        return sum((s.constants for s in self.statements if not (type(s) is stmt.Put and s.offset == self.offsIP)), [])\n\n    @property\n    def constant_jump_targets(self):\n        \"\"\"\n        A set of the static jump targets of the basic block.\n        \"\"\"\n        exits = set()\n\n        if self.exit_statements:\n            for _, _, stmt_ in self.exit_statements:\n                exits.add(stmt_.dst.value)\n\n        default_target = self.default_exit_target\n        if default_target is not None:\n            exits.add(default_target)\n\n        return exits\n\n    @property\n    def constant_jump_targets_and_jumpkinds(self):\n        \"\"\"\n        A dict of the static jump targets of the basic block to their jumpkind.\n        \"\"\"\n        exits = {}\n\n        if self.exit_statements:\n            for _, _, stmt_ in self.exit_statements:\n                exits[stmt_.dst.value] = stmt_.jumpkind\n\n        default_target = self.default_exit_target\n        if default_target is not None:\n            exits[default_target] = self.jumpkind\n\n        return exits\n\n    #\n    # private methods\n    #\n\n    def _pp_str(self) -> str:\n        \"\"\"\n        Return the pretty-printed IRSB.\n        \"\"\"\n        sa = []\n        sa.append(\"IRSB {\")\n        if self.statements is not None:\n            sa.append(\"   %s\" % self.tyenv)\n        sa.append(\"\")\n        if self.statements is not None:\n            for i, s in enumerate(self.statements):\n                if isinstance(s, stmt.Put):\n                    stmt_str = s.pp_str(\n                        reg_name=self.arch.translate_register_name(s.offset, s.data.result_size(self.tyenv) // 8)\n                    )\n                elif isinstance(s, stmt.WrTmp) and isinstance(s.data, expr.Get):\n                    stmt_str = s.pp_str(\n                        reg_name=self.arch.translate_register_name(s.data.offset, s.data.result_size(self.tyenv) // 8)\n                    )\n                elif isinstance(s, stmt.Exit):\n                    stmt_str = s.pp_str(reg_name=self.arch.translate_register_name(s.offsIP, self.arch.bits // 8))\n                else:\n                    stmt_str = s.pp_str()\n                sa.append(\"   %02d | %s\" % (i, stmt_str))\n        else:\n            sa.append(\"   Statements are omitted.\")\n        sa.append(f\"   NEXT: PUT({self.arch.translate_register_name(self.offsIP)}) = {self.next}; {self.jumpkind}\")\n        sa.append(\"}\")\n        return \"\\n\".join(sa)\n\n    def _is_defaultexit_direct_jump(self):\n        \"\"\"\n        Checks if the default of this IRSB a direct jump or not.\n        \"\"\"\n        if not (self.jumpkind == \"Ijk_InvalICache\" or self.jumpkind == \"Ijk_Boring\" or self.jumpkind == \"Ijk_Call\"):\n            return False\n\n        target = self.default_exit_target\n        return target is not None\n\n    #\n    # internal \"constructors\" to fill this block out with data from various sources\n    #\n\n    def _from_c(self, lift_r, skip_stmts=False):\n        c_irsb = lift_r.irsb\n        if not skip_stmts:\n            self.statements = [stmt.IRStmt._from_c(c_irsb.stmts[i]) for i in range(c_irsb.stmts_used)]\n            self.tyenv = IRTypeEnv._from_c(self.arch, c_irsb.tyenv)\n        else:\n            self.statements = None\n            self.tyenv = None\n\n        self.next = expr.IRExpr._from_c(c_irsb.next)\n        self.jumpkind = get_enum_from_int(c_irsb.jumpkind)\n        self._size = lift_r.size\n        self.is_noop_block = lift_r.is_noop_block == 1\n        self._instructions = lift_r.insts\n        self._instruction_addresses = tuple(itertools.islice(lift_r.inst_addrs, lift_r.insts))\n\n        # Conditional exits\n        exit_statements = []\n        if skip_stmts:\n            if lift_r.exit_count > self.MAX_EXITS:\n                # There are more exits than the default size of the exits array. We will need all statements\n                raise SkipStatementsError(\"exit_count exceeded MAX_EXITS (%d)\" % self.MAX_EXITS)\n            for i in range(lift_r.exit_count):\n                ex = lift_r.exits[i]\n                exit_stmt = stmt.IRStmt._from_c(ex.stmt)\n                exit_statements.append((ex.ins_addr, ex.stmt_idx, exit_stmt))\n\n            self._exit_statements = tuple(exit_statements)\n        else:\n            self._exit_statements = None  # It will be generated when self.exit_statements is called\n        # The default exit\n        if lift_r.is_default_exit_constant == 1:\n            self.default_exit_target = lift_r.default_exit\n        else:\n            self.default_exit_target = None\n\n        # Data references\n        self.data_refs = None\n        if lift_r.data_ref_count > 0:\n            if lift_r.data_ref_count > self.MAX_DATA_REFS:\n                raise SkipStatementsError(f\"data_ref_count exceeded MAX_DATA_REFS ({self.MAX_DATA_REFS})\")\n            self.data_refs = [DataRef.from_c(lift_r.data_refs[i]) for i in range(lift_r.data_ref_count)]\n\n        # Const values\n        self.const_vals = None\n        if lift_r.const_val_count > 0:\n            if lift_r.const_val_count > self.MAX_CONST_VALS:\n                raise SkipStatementsError(f\"const_val_count exceeded MAX_CONST_VALS ({self.MAX_CONST_VALS})\")\n            self.const_vals = [ConstVal.from_c(lift_r.const_vals[i]) for i in range(lift_r.const_val_count)]\n\n    def _set_attributes(\n        self,\n        statements=None,\n        nxt=None,\n        tyenv=None,\n        jumpkind=None,\n        direct_next=None,\n        size=None,\n        instructions=None,\n        instruction_addresses=None,\n        exit_statements=None,\n        default_exit_target=None,\n    ):\n        self.statements = statements if statements is not None else []\n        self.next = nxt\n        if tyenv is not None:\n            self.tyenv = tyenv\n        self.jumpkind = jumpkind\n        self._direct_next = direct_next\n        self._size = size\n        self._instructions = instructions\n        self._instruction_addresses = instruction_addresses\n        self._exit_statements = exit_statements\n        self.default_exit_target = default_exit_target\n\n    def _from_py(self, irsb):\n        self._set_attributes(\n            irsb.statements,\n            irsb.next,\n            irsb.tyenv,\n            irsb.jumpkind,\n            irsb.direct_next,\n            irsb.size,\n            instructions=irsb._instructions,\n            instruction_addresses=irsb._instruction_addresses,\n            exit_statements=irsb.exit_statements,\n            default_exit_target=irsb.default_exit_target,\n        )\n\n\nclass IRTypeEnv(VEXObject):\n    \"\"\"\n    An IR type environment.\n\n    :ivar types:        A list of the types of all the temporaries in this block as VEX enum strings.\n                        `types[3]` is the type of t3.\n    :vartype types:     list of str\n    \"\"\"\n\n    __slots__ = [\"types\", \"wordty\"]\n\n    def __init__(self, arch, types=None):\n        VEXObject.__init__(self)\n        self.types = [] if types is None else types\n        self.wordty = \"Ity_I%d\" % arch.bits\n\n    def __str__(self):\n        return \" \".join((\"t%d:%s\" % (i, t)) for i, t in enumerate(self.types))\n\n    def lookup(self, tmp: int) -> str:\n        \"\"\"\n        Return the type of temporary variable `tmp` as an enum string\n        \"\"\"\n        if tmp < 0 or tmp > self.types_used:\n            log.debug(\"Invalid temporary number %d\", tmp)\n            raise IndexError(tmp)\n        return self.types[tmp]\n\n    def sizeof(self, tmp):\n        return get_type_size(self.lookup(tmp))\n\n    def add(self, ty):\n        \"\"\"\n        Add a new tmp of type `ty` to the environment. Returns the number of the new tmp.\n        \"\"\"\n        self.types.append(ty)\n        return self.types_used - 1\n\n    @property\n    def types_used(self):\n        return len(self.types)\n\n    @staticmethod\n    def _from_c(arch, c_tyenv):\n        return IRTypeEnv(arch, [get_enum_from_int(c_tyenv.types[t]) for t in range(c_tyenv.types_used)])\n\n    @staticmethod\n    def _to_c(tyenv):\n        c_tyenv = pvc.emptyIRTypeEnv()\n        for ty in tyenv.types:\n            pvc.newIRTemp(c_tyenv, get_int_from_enum(ty))\n        return c_tyenv\n\n    def typecheck(self):\n        for ty in self.types:\n            try:\n                get_type_size(ty)\n            except ValueError:\n                return False\n        return True\n"
  },
  {
    "path": "pyvex/const.py",
    "content": "# pylint:disable=missing-class-docstring,raise-missing-from,not-callable\nimport re\nfrom abc import ABC\n\nfrom .enums import VEXObject, get_enum_from_int\nfrom .errors import PyVEXError\nfrom .native import ffi, pvc\n\n\n# IRConst hierarchy\nclass IRConst(VEXObject, ABC):\n    __slots__ = [\"_value\"]\n\n    type: str\n    size: int\n    tag: str\n    c_constructor = None\n    _value: int\n\n    def pp(self):\n        print(str(self))\n\n    @property\n    def value(self) -> int:\n        return self._value\n\n    @staticmethod\n    def _from_c(c_const):\n        if c_const[0] == ffi.NULL:\n            return None\n\n        tag = get_enum_from_int(c_const.tag)\n\n        try:\n            return tag_to_const_class(tag)._from_c(c_const)\n        except KeyError:\n            raise PyVEXError(\"Unknown/unsupported IRConstTag %s\\n\" % tag)\n\n    _translate = _from_c\n\n    @classmethod\n    def _to_c(cls, const):\n        # libvex throws an exception when constructing a U1 with a value other than 0 or 1\n        if const.tag == \"Ico_U1\" and const.value not in (0, 1):\n            raise PyVEXError(\"Invalid U1 value: %d\" % const.value)\n\n        try:\n            return cls.c_constructor(const.value)\n        except KeyError:\n            raise PyVEXError(\"Unknown/unsupported IRConstTag %s]n\" % const.tag)\n\n    def __eq__(self, other):\n        if not isinstance(other, type(self)):\n            return False\n        return self._value == other._value\n\n    def __hash__(self):\n        return hash((type(self), self._value))\n\n\nclass U1(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_I1\"\n    size = 1\n    tag = \"Ico_U1\"\n    op_format = \"1\"\n    c_constructor = pvc.IRConst_U1\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"%d\" % self.value\n\n    @staticmethod\n    def _from_c(c_const):\n        return U1(c_const.Ico.U1)\n\n\nclass U8(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_I8\"\n    size = 8\n    tag = \"Ico_U8\"\n    op_format = \"8\"\n    c_constructor = pvc.IRConst_U8\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"0x%02x\" % self.value\n\n    @staticmethod\n    def _from_c(c_const):\n        return _U8_POOL[c_const.Ico.U8]\n\n\n_U8_POOL = [U8(i) for i in range(256)]\n\n\nclass U16(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_I16\"\n    size = 16\n    tag = \"Ico_U16\"\n    op_format = \"16\"\n    c_constructor = pvc.IRConst_U16\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"0x%04x\" % self.value\n\n    @staticmethod\n    def _from_c(c_const):\n        val = c_const.Ico.U16\n        if val < 1024:\n            return _U16_POOL[val]\n        if val >= 0xFC00:\n            return _U16_POOL[val - 0xFC00 + 1024]\n        return U16(val)\n\n\n_U16_POOL = [U16(i) for i in range(1024)] + [U16(i) for i in range(0xFC00, 0xFFFF + 1)]\n\n\nclass U32(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_I32\"\n    size = 32\n    tag = \"Ico_U32\"\n    op_format = \"32\"\n    c_constructor = pvc.IRConst_U32\n\n    def __init__(self, value: int):\n        self._value = value\n\n    def __str__(self):\n        return \"0x%08x\" % self.value\n\n    @staticmethod\n    def _from_c(c_const):\n        val = c_const.Ico.U32\n        if val < 1024:\n            return _U32_POOL[val]\n        if val >= 0xFFFFFC00:\n            return _U32_POOL[val - 0xFFFFFC00 + 1024]\n        return U32(val)\n\n\n_U32_POOL = [U32(i) for i in range(1024)] + [U32(i) for i in range(0xFFFFFC00, 0xFFFFFFFF + 1)]\n\n\nclass U64(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_I64\"\n    size = 64\n    tag = \"Ico_U64\"\n    op_format = \"64\"\n    c_constructor = pvc.IRConst_U64\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"0x%016x\" % self.value\n\n    @staticmethod\n    def _from_c(c_const):\n        val = c_const.Ico.U64\n        if val < 1024:\n            return _U64_POOL[val]\n        if val >= 0xFFFFFFFFFFFFFC00:\n            return _U64_POOL[val - 0xFFFFFFFFFFFFFC00 + 1024]\n        return U64(val)\n\n\n_U64_POOL = [U64(i) for i in range(1024)] + [U64(i) for i in range(0xFFFFFFFFFFFFFC00, 0xFFFFFFFFFFFFFFFF + 1)]\n\n# Integer Type Imagination\nclass_cache = {1: U1, 8: U8, 16: U16, 32: U32, 64: U64}\n\n\ndef vex_int_class(size):\n    try:\n        return class_cache[size]\n    except KeyError:\n\n        class VexInt(IRConst):\n            type = \"Ity_I%d\" % size\n            tag = \"Ico_U%d\" % size\n            op_format = str(size)\n\n            def __init__(self, value):\n                IRConst.__init__(self)\n                self._value = value\n\n            def __str__(self):\n                return f\"(0x{self.value:x} :: {self.type})\"\n\n        VexInt.__name__ = \"U%d\" % size\n        class_cache[size] = VexInt\n        return VexInt\n\n\nclass F32(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_F32\"\n    tag = \"Ico_F32\"\n    op_format = \"F32\"\n    c_constructor = pvc.IRConst_F32\n    size = 32\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"%f\" % self.value\n\n    @staticmethod\n    def _from_c(c_const):\n        return F32(c_const.Ico.F32)\n\n\nclass F32i(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_F32\"\n    tag = \"Ico_F32i\"\n    op_format = \"F32\"\n    c_constructor = pvc.IRConst_F32i\n    size = 32\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"%f\" % self.value\n\n    @staticmethod\n    def _from_c(c_const):\n        return F32i(c_const.Ico.F32)\n\n\nclass F64(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_F64\"\n    tag = \"Ico_F64\"\n    op_format = \"F64\"\n    c_constructor = pvc.IRConst_F64\n    size = 64\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"%f\" % self.value\n\n    @staticmethod\n    def _from_c(c_const):\n        return F64(c_const.Ico.F64)\n\n\nclass F64i(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_F64\"\n    tag = \"Ico_F64i\"\n    op_format = \"F64\"\n    c_constructor = pvc.IRConst_F64i\n    size = 64\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"%f\" % self.value\n\n    @staticmethod\n    def _from_c(c_const):\n        return F64i(c_const.Ico.F64)\n\n\nclass V128(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_V128\"\n    tag = \"Ico_V128\"\n    op_format = \"V128\"\n    c_constructor = pvc.IRConst_V128\n    size = 128\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"%x\" % self.value\n\n    # vex doesn't store a full 128 bit constant, instead it stores 1 bit per 8 bits of data\n    # and duplicates each bit 8 times\n    @staticmethod\n    def _from_c(c_const):\n        base_const = c_const.Ico.V128\n        real_const = 0\n        for i in range(16):\n            if (base_const >> i) & 1 == 1:\n                real_const |= 0xFF << (8 * i)\n        return V128(real_const)\n\n\nclass V256(IRConst):\n    __slots__: list[str] = []\n\n    type = \"Ity_V256\"\n    tag = \"Ico_V256\"\n    op_format = \"V256\"\n    c_constructor = pvc.IRConst_V256\n    size = 256\n\n    def __init__(self, value):\n        self._value = value\n\n    def __str__(self):\n        return \"%x\" % self.value\n\n    # see above\n    @staticmethod\n    def _from_c(c_const):\n        base_const = c_const.Ico.V256\n        real_const = 0\n        for i in range(32):\n            if (base_const >> i) & 1 == 1:\n                real_const |= 0xFF << (8 * i)\n        return V256(real_const)\n\n\npredefined_types = [U1, U8, U16, U32, U64, F32, F32i, F64, F64i, V128, V256]\npredefined_types_map = {c.type: c for c in predefined_types}\npredefined_classes_map = {c.tag: c for c in predefined_types}\n\n# precompiled regexes\nint_ty_re = re.compile(r\"Ity_I\\d+\")\nint_tag_re = re.compile(r\"Ico_U\\d+\")\ntag_size_re = re.compile(r\"Ico_[UFV](?P<size>\\d+)i?\")\n\n\ndef is_int_ty(ty):\n    m = int_ty_re.match(ty)\n    return m is not None\n\n\ndef is_int_tag(tag):\n    m = int_tag_re.match(tag)\n    return m is not None\n\n\ndef get_tag_size(tag):\n    m = tag_size_re.match(tag)\n    if m is None:\n        raise ValueError(\"Tag %s does not have size\" % tag)\n    return int(m.group(\"size\"))\n\n\ntype_str_re = re.compile(r\"Ity_[IFDV](?P<size>\\d+)\")\ntype_tag_str_re = re.compile(r\"[IFDV]?(?P<size>\\d+)[SU]?\")\n\n\ndef get_type_size(ty):\n    \"\"\"\n    Returns the size, in BITS, of a VEX type specifier\n    e.g., Ity_I16 -> 16\n\n    :param ty:\n    :return:\n    \"\"\"\n    m = type_str_re.match(ty)\n    if m is None:\n        raise ValueError(\"Type %s does not have size\" % ty)\n    return int(m.group(\"size\"))\n\n\ndef get_type_spec_size(ty):\n    \"\"\"\n    Get the width of a \"type specifier\"\n    like I16U\n    or F16\n    or just 16\n    (Yes, this really just takes the int out.  If we must special-case, do it here.\n    :param tyspec:\n    :return:\n    \"\"\"\n    m = type_tag_str_re.match(ty)\n    if m is None:\n        raise ValueError(\"Type specifier %s does not have size\" % ty)\n    return int(m.group(\"size\"))\n\n\ndef ty_to_const_class(ty):\n    try:\n        return predefined_types_map[ty]\n    except KeyError:\n        if is_int_ty(ty):\n            size = get_type_size(ty)\n            return vex_int_class(size)\n        else:\n            raise ValueError(\"Type %s does not exist\" % ty)\n\n\ndef tag_to_const_class(tag):\n    try:\n        return predefined_classes_map[tag]\n    except KeyError:\n        if is_int_tag(tag):\n            size = get_tag_size(tag)\n            return vex_int_class(size)\n        else:\n            raise ValueError(\"Tag %s does not exist\" % tag)\n"
  },
  {
    "path": "pyvex/const_val.py",
    "content": "class ConstVal:\n    \"\"\"\n    A constant value object. Indicates a constant value assignment to a VEX tmp variable.\n\n    :ivar tmp:          The tmp variable being assigned to.\n    :ivar value:        The value of the tmp variable.\n    :ivar stmt_idx:     The IRSB statement index containing the data access\n    \"\"\"\n\n    __slots__ = (\n        \"tmp\",\n        \"value\",\n        \"stmt_idx\",\n    )\n\n    def __init__(self, tmp: int, value: int, stmt_idx: int):\n        self.tmp = tmp\n        self.value = value\n        self.stmt_idx = stmt_idx\n\n    def __repr__(self):\n        return f\"<ConstVal {self.tmp} = {self.value:#x} @ {self.stmt_idx}>\"\n\n    @classmethod\n    def from_c(cls, r):\n        return cls(r.tmp, r.value, r.stmt_idx)\n"
  },
  {
    "path": "pyvex/data_ref.py",
    "content": "def data_ref_type_str(dref_enum):\n    \"\"\"\n    Translate an ``enum DataRefTypes`` value into a string representation.\n    \"\"\"\n    if dref_enum == 0x9000:\n        return \"unknown\"\n    elif dref_enum == 0x9001:\n        return \"integer\"\n    elif dref_enum == 0x9002:\n        return \"fp\"\n    elif dref_enum == 0x9003:\n        return \"integer(store)\"\n    else:\n        return \"INVALID\"\n\n\nclass DataRef:\n    \"\"\"\n    A data reference object. Indicates a data access in an IRSB.\n\n    :ivar data_addr:    The address of the data being accessed\n    :ivar data_size:    The size of the data being accessed, in bytes\n    :ivar data_type:    The type of the data, a DataRefTypes enum.\n    :ivar stmt_idx:     The IRSB statement index containing the data access\n    :ivar ins_addr:     The address of the instruction performing the data access\n    \"\"\"\n\n    __slots__ = (\"data_addr\", \"data_size\", \"data_type\", \"stmt_idx\", \"ins_addr\")\n\n    def __init__(self, data_addr, data_size, data_type, stmt_idx, ins_addr):\n        self.data_addr = data_addr\n        self.data_size = data_size\n        self.data_type = data_type\n        self.stmt_idx = stmt_idx\n        self.ins_addr = ins_addr\n\n    @property\n    def data_type_str(self):\n        \"\"\"\n        The data ref type as a string, \"unknown\" \"integer\" \"fp\" or \"INVALID\"\n        \"\"\"\n        return data_ref_type_str(self.data_type)\n\n    def __repr__(self):\n        return \"<DataRef accessing %#x %s:%d at %#x:%d>\" % (\n            self.data_addr,\n            data_ref_type_str(self.data_type),\n            self.data_size,\n            self.ins_addr,\n            self.stmt_idx,\n        )\n\n    @classmethod\n    def from_c(cls, r):\n        return cls(r.data_addr, r.size, r.data_type, r.stmt_idx, r.ins_addr)\n"
  },
  {
    "path": "pyvex/enums.py",
    "content": "from typing import Any\n\nfrom .native import ffi, pvc\nfrom .utils import stable_hash\n\n\nclass VEXObject:\n    \"\"\"\n    The base class for Vex types.\n    \"\"\"\n\n    __slots__: list[str] = []\n\n    def __eq__(self, other):\n        if not isinstance(other, type(self)):\n            return False\n        # compare values in slots\n        for slot in self.__slots__:\n            if getattr(self, slot) != getattr(other, slot):\n                return False\n        return True\n\n    def __hash__(self):\n        values = [getattr(self, slot) for slot in self.__slots__]\n        for i, lst_val in enumerate(values):\n            if isinstance(lst_val, list):\n                values[i] = tuple(lst_val)\n        return stable_hash(tuple([type(self)] + values))\n\n\nclass IRCallee(VEXObject):\n    \"\"\"\n    Describes a helper function to call.\n    \"\"\"\n\n    __slots__ = [\"regparms\", \"name\", \"mcx_mask\"]\n\n    def __init__(self, regparms, name, mcx_mask):\n        VEXObject.__init__(self)\n        self.regparms = regparms\n        self.name = name\n        self.mcx_mask = mcx_mask\n\n    def __str__(self):\n        return str(self.name)\n\n    @staticmethod\n    def _from_c(c_callee):\n        return IRCallee(\n            c_callee.regparms,\n            ffi.string(c_callee.name).decode(),\n            # NO. #int(ffi.cast(\"unsigned long long\", c_callee.addr)),\n            c_callee.mcx_mask,\n        )\n\n    @staticmethod\n    def _to_c(callee):  # pylint: disable=unused-argument\n        raise TypeError(\n            \"This doesn't work! Please invent a way to get the correct address for the named function from pyvex_c.\"\n        )\n        # c_callee = pvc.mkIRCallee(callee.regparms,\n        #                          callee.name.encode(),\n        #                          ffi.cast(\"void *\", callee.addr))\n        # c_callee.mcx_mask = callee.mcx_mask\n        # return c_callee\n\n\nclass IRRegArray(VEXObject):\n    \"\"\"\n    A section of the guest state that we want te be able to index at run time, so as to be able to describe indexed or\n    rotating register files on the guest.\n\n    :ivar int base:     The offset into the state that this array starts\n    :ivar str elemTy:   The types of the elements in this array, as VEX enum strings\n    :ivar int nElems:   The number of elements in this array\n    \"\"\"\n\n    __slots__ = [\"base\", \"elemTy\", \"nElems\"]\n\n    def __init__(self, base, elemTy, nElems):\n        VEXObject.__init__(self)\n        self.base = base\n        self.elemTy = elemTy\n        self.nElems = nElems\n\n    def __str__(self):\n        return \"%s:%sx%d\" % (self.base, self.elemTy[4:], self.nElems)\n\n    @staticmethod\n    def _from_c(c_arr):\n        return IRRegArray(c_arr.base, ints_to_enums[c_arr.elemTy], c_arr.nElems)\n\n    @staticmethod\n    def _to_c(arr):\n        return pvc.mkIRRegArray(arr.base, get_int_from_enum(arr.elemTy), arr.nElems)\n\n\nints_to_enums: dict[int, str] = {}\nenums_to_ints: dict[str, int] = {}\nirop_enums_to_ints: dict[str, int] = {}\nwill_be_overwritten = [\"Ircr_GT\", \"Ircr_LT\"]\n\n\ndef get_enum_from_int(i):\n    return ints_to_enums[i]\n\n\ndef get_int_from_enum(e):\n    return enums_to_ints[e]\n\n\n_add_enum_counter = 0\n\n\ndef _add_enum(s, i=None):  # TODO get rid of this\n    global _add_enum_counter  # pylint: disable=global-statement\n    if i is None:\n        while _add_enum_counter in ints_to_enums:\n            _add_enum_counter += 1\n        i = _add_enum_counter\n        _add_enum_counter += 1  # Update for the next iteration\n    if i in ints_to_enums:\n        if ints_to_enums[i] not in will_be_overwritten:\n            raise ValueError(\"Enum with intkey %d already present\" % i)\n    enums_to_ints[s] = i\n    ints_to_enums[i] = s\n    if s.startswith(\"Iop_\"):\n        irop_enums_to_ints[s] = i\n\n\nfor attr in dir(pvc):\n    if attr[0] in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" and hasattr(pvc, attr) and isinstance(getattr(pvc, attr), int):\n        _add_enum(attr, getattr(pvc, attr))\n\n\ndef vex_endness_from_string(endness_str):\n    return getattr(pvc, endness_str)\n\n\ndef default_vex_archinfo() -> dict[str, Any]:\n    return {\n        \"hwcaps\": 0,\n        \"endness\": vex_endness_from_string(\"VexEndnessLE\"),\n        \"ppc_icache_line_szB\": 0,\n        \"ppc_dcbz_szB\": 0,\n        \"ppc_dcbzl_szB\": 0,\n        \"arm64_dMinLine_lg2_szB\": 0,\n        \"arm64_iMinLine_lg2_szB\": 0,\n        \"hwcache_info\": {\n            \"num_levels\": 0,\n            \"num_caches\": 0,\n            \"caches\": None,\n            \"icaches_maintain_coherence\": True,\n        },\n        \"x86_cr0\": 0xFFFFFFFF,\n    }\n"
  },
  {
    "path": "pyvex/errors.py",
    "content": "class PyVEXError(Exception):\n    pass\n\n\nclass SkipStatementsError(PyVEXError):\n    pass\n\n\n#\n# Exceptions and notifications that post-processors can raise\n#\n\n\nclass LiftingException(Exception):\n    pass\n\n\nclass NeedStatementsNotification(LiftingException):\n    \"\"\"\n    A post-processor may raise a NeedStatementsNotification if it needs to work with statements, but the current IRSB\n    is generated without any statement available (skip_stmts=True). The lifter will re-lift the current block with\n    skip_stmts=False upon catching a NeedStatementsNotification, and re-run the post-processors.\n\n    It's worth noting that if a post-processor always raises this notification for every basic block without statements,\n    it will essentially disable the skipping statement optimization, and it is bad for performance (especially for\n    CFGFast, which heavily relies on this optimization). Post-processor authors are encouraged to at least filter the\n    IRSBs based on available properties (jumpkind, next, etc.). If a post-processor must work with statements for the\n    majority of IRSBs, the author should implement it in PyVEX in C for the sake of a better performance.\n    \"\"\"\n\n    pass\n"
  },
  {
    "path": "pyvex/expr.py",
    "content": "from __future__ import annotations\n\nimport logging\nimport re\nfrom typing import TYPE_CHECKING\n\nfrom .const import U8, U16, U32, U64, IRConst, get_type_size\nfrom .enums import IRCallee, IRRegArray, VEXObject, get_enum_from_int, get_int_from_enum\nfrom .errors import PyVEXError\nfrom .native import ffi, pvc\n\nif TYPE_CHECKING:\n    from .block import IRTypeEnv\n\nlog = logging.getLogger(\"pyvex.expr\")\n\n\nclass IRExpr(VEXObject):\n    \"\"\"\n    IR expressions in VEX represent operations without side effects.\n    \"\"\"\n\n    __slots__ = []\n\n    tag: str | None = None\n    tag_int = 0  # set automatically at bottom of file\n\n    def pp(self):\n        print(str(self))\n\n    def __str__(self):\n        return self._pp_str()\n\n    def _pp_str(self) -> str:\n        raise NotImplementedError\n\n    @property\n    def child_expressions(self) -> list[IRExpr]:\n        \"\"\"\n        A list of all of the expressions that this expression ends up evaluating.\n        \"\"\"\n        expressions = []\n        for k in self.__slots__:\n            v = getattr(self, k)\n            if isinstance(v, IRExpr):\n                expressions.append(v)\n                expressions.extend(v.child_expressions)\n        return expressions\n\n    @property\n    def constants(self):\n        \"\"\"\n        A list of all of the constants that this expression ends up using.\n        \"\"\"\n        constants = []\n        for k in self.__slots__:\n            v = getattr(self, k)\n            if isinstance(v, IRExpr):\n                constants.extend(v.constants)\n            elif isinstance(v, IRConst):\n                constants.append(v)\n        return constants\n\n    def result_size(self, tyenv: IRTypeEnv):\n        return get_type_size(self.result_type(tyenv))\n\n    def result_type(self, tyenv: IRTypeEnv):\n        raise NotImplementedError()\n\n    def replace_expression(self, replacements):\n        \"\"\"\n        Replace child expressions in-place.\n\n        :param Dict[IRExpr, IRExpr] replacements:  A mapping from expression-to-find to expression-to-replace-with\n        :return:                    None\n        \"\"\"\n\n        for k in self.__slots__:\n            v = getattr(self, k)\n            if isinstance(v, IRExpr) and v in replacements:\n                setattr(self, k, replacements.get(v))\n            elif isinstance(v, list):\n                # Replace the instance in the list\n                for i, expr_ in enumerate(v):\n                    if isinstance(expr_, IRExpr) and expr_ in replacements:\n                        v[i] = replacements.get(expr_)\n            elif type(v) is tuple:\n                # Rebuild the tuple\n                _lst = []\n                replaced = False\n                for i, expr_ in enumerate(v):\n                    if isinstance(expr_, IRExpr) and expr_ in replacements:\n                        _lst.append(replacements.get(expr_))\n                        replaced = True\n                    else:\n                        _lst.append(expr_)\n                if replaced:\n                    setattr(self, k, tuple(_lst))\n            elif isinstance(v, IRExpr):\n                v.replace_expression(replacements)\n\n    @staticmethod\n    def _from_c(c_expr) -> IRExpr | None:\n        if c_expr == ffi.NULL or c_expr[0] == ffi.NULL:\n            return None\n\n        try:\n            return enum_to_expr_class(c_expr.tag)._from_c(c_expr)\n        except KeyError:\n            raise PyVEXError(\"Unknown/unsupported IRExprTag %s\\n\" % get_enum_from_int(c_expr.tag))\n\n    _translate = _from_c\n\n    @staticmethod\n    def _to_c(expr):\n        try:\n            return tag_to_expr_class(expr.tag)._to_c(expr)\n        except KeyError:\n            raise PyVEXError(\"Unknown/unsupported IRExprTag %s\\n\" % expr.tag)\n\n    def typecheck(self, tyenv):\n        return self.result_type(tyenv)\n\n\nclass Binder(IRExpr):\n    \"\"\"\n    Used only in pattern matching within Vex. Should not be seen outside of Vex.\n    \"\"\"\n\n    __slots__ = [\"binder\"]\n\n    tag = \"Iex_Binder\"\n\n    def __init__(self, binder):\n        self.binder = binder\n\n    def _pp_str(self):\n        return \"Binder\"\n\n    @staticmethod\n    def _from_c(c_expr):\n        return Binder(c_expr.iex.Binder.binder)\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_Binder(expr.binder)\n\n    def result_type(self, tyenv):\n        return \"Ity_INVALID\"\n\n\nclass VECRET(IRExpr):\n    tag = \"Iex_VECRET\"\n\n    __slots__ = []\n\n    def _pp_str(self):\n        return \"VECRET\"\n\n    @staticmethod\n    def _from_c(c_expr):\n        return VECRET()\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_VECRET()\n\n    def result_type(self, tyenv):\n        return \"Ity_INVALID\"\n\n\nclass GSPTR(IRExpr):\n    __slots__ = []\n\n    tag = \"Iex_GSPTR\"\n\n    def _pp_str(self):\n        return \"GSPTR\"\n\n    @staticmethod\n    def _from_c(c_expr):\n        return GSPTR()\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_GSPTR()\n\n    def result_type(self, tyenv):\n        return \"Ity_INVALID\"\n\n\nclass GetI(IRExpr):\n    \"\"\"\n    Read a guest register at a non-fixed offset in the guest state.\n    \"\"\"\n\n    __slots__ = [\"descr\", \"ix\", \"bias\"]\n\n    tag = \"Iex_GetI\"\n\n    def __init__(self, descr, ix, bias):\n        self.descr = descr\n        self.ix = ix\n        self.bias = bias\n\n    @property\n    def description(self):\n        return self.descr\n\n    @property\n    def index(self):\n        return self.ix\n\n    def _pp_str(self):\n        return f\"GetI({self.descr})[{self.ix},{self.bias}]\"\n\n    @staticmethod\n    def _from_c(c_expr):\n        descr = IRRegArray._from_c(c_expr.Iex.GetI.descr)\n        ix = IRExpr._from_c(c_expr.Iex.GetI.ix)\n        bias = c_expr.Iex.GetI.bias\n        return GetI(descr, ix, bias)\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_GetI(IRRegArray._to_c(expr.descr), IRExpr._to_c(expr.ix), expr.bias)\n\n    def result_type(self, tyenv):\n        return self.descr.elemTy\n\n\nclass RdTmp(IRExpr):\n    \"\"\"\n    Read the value held by a temporary.\n    \"\"\"\n\n    __slots__ = [\"_tmp\"]\n\n    tag = \"Iex_RdTmp\"\n\n    def __init__(self, tmp):\n        self._tmp = tmp\n\n    def _pp_str(self):\n        return \"t%d\" % self.tmp\n\n    @property\n    def tmp(self):\n        return self._tmp\n\n    @staticmethod\n    def _from_c(c_expr):\n        tmp = c_expr.Iex.RdTmp.tmp\n        return RdTmp.get_instance(tmp)\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_RdTmp(expr.tmp)\n\n    @staticmethod\n    def get_instance(tmp):\n        if tmp < 1024:\n            # for small tmp reads, they are cached and are only created once globally\n            return _RDTMP_POOL[tmp]\n        return RdTmp(tmp)\n\n    def replace_expression(self, replacements):\n        # RdTmp is one of the terminal IRExprs, which cannot be replaced.\n        pass\n\n    def result_type(self, tyenv):\n        return tyenv.lookup(self.tmp)\n\n    def __hash__(self):\n        return 133700 + self._tmp\n\n\n_RDTMP_POOL = list(RdTmp(i) for i in range(0, 1024))\n\n\nclass Get(IRExpr):\n    \"\"\"\n    Read a guest register, at a fixed offset in the guest state.\n    \"\"\"\n\n    __slots__ = [\"offset\", \"ty_int\"]\n\n    tag = \"Iex_Get\"\n\n    def __init__(self, offset: int, ty: str, ty_int: int | None = None):\n        self.offset = offset\n        if ty_int is None:\n            self.ty_int = get_int_from_enum(ty)\n        else:\n            self.ty_int = ty_int\n\n    @property\n    def ty(self):\n        return get_enum_from_int(self.ty_int)\n\n    @property\n    def type(self):\n        return get_enum_from_int(self.ty_int)\n\n    def _pp_str(self):\n        return f\"GET:{self.ty[4:]}(offset={self.offset})\"\n\n    def pp_str_with_name(self, reg_name: str):\n        \"\"\"pp_str_with_name is used to print the expression with the name of the\n        register instead of the offset\"\"\"\n        return f\"GET:{self.ty[4:]}({reg_name})\"\n\n    @staticmethod\n    def _from_c(c_expr):\n        return Get(c_expr.Iex.Get.offset, get_enum_from_int(c_expr.Iex.Get.ty))\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_Get(expr.offset, expr.ty_int)\n\n    def result_type(self, tyenv):\n        return self.ty\n\n    def __hash__(self):\n        return (self.offset << 8) | self.ty_int\n\n\nclass Qop(IRExpr):\n    \"\"\"\n    A quaternary operation (4 arguments).\n    \"\"\"\n\n    __slots__ = [\"op\", \"args\"]\n\n    tag = \"Iex_Qop\"\n\n    def __init__(self, op, args):\n        self.op = op\n        self.args = args\n\n    def _pp_str(self):\n        return \"{}({})\".format(self.op[4:], \",\".join(str(a) for a in self.args))\n\n    @property\n    def child_expressions(self):\n        expressions = sum((a.child_expressions for a in self.args), [])\n        expressions.extend(self.args)\n        return expressions\n\n    @staticmethod\n    def _from_c(c_expr):\n        return Qop(\n            get_enum_from_int(c_expr.Iex.Qop.details.op),\n            [\n                IRExpr._from_c(arg)\n                for arg in [\n                    c_expr.Iex.Qop.details.arg1,\n                    c_expr.Iex.Qop.details.arg2,\n                    c_expr.Iex.Qop.details.arg3,\n                    c_expr.Iex.Qop.details.arg4,\n                ]\n            ],\n        )\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_Qop(get_int_from_enum(expr.op), *[IRExpr._to_c(arg) for arg in expr.args])\n\n    def result_type(self, tyenv):\n        return get_op_retty(self.op)\n\n    def typecheck(self, tyenv):  # TODO change all this to use PyvexTypeErrorException\n        resty, (arg1ty, arg2ty, arg3ty, arg4ty) = op_arg_types(self.op)\n        arg1ty_real = self.args[0].typecheck(tyenv)\n        arg2ty_real = self.args[1].typecheck(tyenv)\n        arg3ty_real = self.args[2].typecheck(tyenv)\n        arg4ty_real = self.args[3].typecheck(tyenv)\n        if arg1ty_real is None or arg2ty_real is None or arg3ty_real is None or arg4ty_real is None:\n            return None\n\n        if arg1ty_real != arg1ty:\n            log.debug(\"First arg of %s must be %s\", self.op, arg1ty)\n            return None\n        if arg2ty_real != arg2ty:\n            log.debug(\"Second arg of %s must be %s\", self.op, arg2ty)\n            return None\n        if arg3ty_real != arg3ty:\n            log.debug(\"Third arg of %s must be %s\", self.op, arg3ty)\n            return None\n        if arg4ty_real != arg4ty:\n            log.debug(\"Fourth arg of %s must be %s\", self.op, arg4ty)\n            return None\n\n        return resty\n\n\nclass Triop(IRExpr):\n    \"\"\"\n    A ternary operation (3 arguments)\n    \"\"\"\n\n    __slots__ = [\"op\", \"args\"]\n\n    tag = \"Iex_Triop\"\n\n    def __init__(self, op, args):\n        self.op = op\n        self.args = args\n\n    def _pp_str(self):\n        return \"{}({})\".format(self.op[4:], \",\".join(str(a) for a in self.args))\n\n    @property\n    def child_expressions(self):\n        expressions = sum((a.child_expressions for a in self.args), [])\n        expressions.extend(self.args)\n        return expressions\n\n    @staticmethod\n    def _from_c(c_expr):\n        return Triop(\n            get_enum_from_int(c_expr.Iex.Triop.details.op),\n            [\n                IRExpr._from_c(arg)\n                for arg in [c_expr.Iex.Triop.details.arg1, c_expr.Iex.Triop.details.arg2, c_expr.Iex.Triop.details.arg3]\n            ],\n        )\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_Triop(get_int_from_enum(expr.op), *[IRExpr._to_c(arg) for arg in expr.args])\n\n    def result_type(self, tyenv):\n        return get_op_retty(self.op)\n\n    def typecheck(self, tyenv):\n        resty, (arg1ty, arg2ty, arg3ty) = op_arg_types(self.op)\n        arg1ty_real = self.args[0].typecheck(tyenv)\n        arg2ty_real = self.args[1].typecheck(tyenv)\n        arg3ty_real = self.args[2].typecheck(tyenv)\n        if arg1ty_real is None or arg2ty_real is None or arg3ty_real is None:\n            return None\n\n        if arg1ty_real != arg1ty:\n            log.debug(\"First arg of %s must be %s\", self.op, arg1ty)\n            return None\n        if arg2ty_real != arg2ty:\n            log.debug(\"Second arg of %s must be %s\", self.op, arg2ty)\n            return None\n        if arg3ty_real != arg3ty:\n            log.debug(\"Third arg of %s must be %s\", self.op, arg3ty)\n            return None\n\n        return resty\n\n\nclass Binop(IRExpr):\n    \"\"\"\n    A binary operation (2 arguments).\n    \"\"\"\n\n    __slots__ = [\"_op\", \"op_int\", \"args\"]\n\n    tag = \"Iex_Binop\"\n\n    def __init__(self, op, args, op_int=None):\n        self.op_int = op_int\n        self.args = args\n        self._op = op if op is not None else None\n\n    def _pp_str(self):\n        return \"{}({})\".format(self.op[4:], \",\".join(str(a) for a in self.args))\n\n    @property\n    def op(self):\n        if self._op is None:\n            self._op = get_enum_from_int(self.op_int)\n        return self._op\n\n    @property\n    def child_expressions(self):\n        expressions = sum((a.child_expressions for a in self.args), [])\n        expressions.extend(self.args)\n        return expressions\n\n    @staticmethod\n    def _from_c(c_expr):\n        return Binop(\n            None,\n            [IRExpr._from_c(arg) for arg in [c_expr.Iex.Binop.arg1, c_expr.Iex.Binop.arg2]],\n            op_int=c_expr.Iex.Binop.op,\n        )\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_Binop(get_int_from_enum(expr.op), *[IRExpr._to_c(arg) for arg in expr.args])\n\n    def result_type(self, tyenv):\n        return get_op_retty(self.op)\n\n    def typecheck(self, tyenv):\n        arg1ty_real = self.args[0].typecheck(tyenv)\n        arg2ty_real = self.args[1].typecheck(tyenv)\n\n        resty, (arg1ty, arg2ty) = op_arg_types(self.op)\n        if arg1ty_real is None or arg2ty_real is None:\n            return None\n\n        if arg1ty_real != arg1ty:\n            log.debug(\"First arg of %s must be %s\", self.op, arg1ty)\n            return None\n        if arg2ty_real != arg2ty:\n            log.debug(\"Second arg of %s must be %s\", self.op, arg2ty)\n            return None\n\n        return resty\n\n\nclass Unop(IRExpr):\n    \"\"\"\n    A unary operation (1 argument).\n    \"\"\"\n\n    __slots__ = [\"op\", \"args\"]\n\n    tag = \"Iex_Unop\"\n\n    def __init__(self, op: str, args: list[IRExpr]):\n        self.op = op\n        self.args = args\n\n    def _pp_str(self):\n        return \"{}({})\".format(self.op[4:], \",\".join(str(a) for a in self.args))\n\n    @property\n    def child_expressions(self):\n        expressions = sum((a.child_expressions for a in self.args), [])\n        expressions.extend(self.args)\n        return expressions\n\n    @staticmethod\n    def _from_c(c_expr):\n        return Unop(get_enum_from_int(c_expr.Iex.Unop.op), [IRExpr._from_c(c_expr.Iex.Unop.arg)])\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_Unop(get_int_from_enum(expr.op), IRExpr._to_c(expr.args[0]))\n\n    def result_type(self, tyenv):\n        return get_op_retty(self.op)\n\n    def typecheck(self, tyenv):\n        resty, (arg1ty,) = op_arg_types(self.op)\n        arg1ty_real = self.args[0].typecheck(tyenv)\n        if arg1ty_real is None:\n            return None\n\n        if arg1ty_real != arg1ty:\n            log.debug(\"First arg of %s must be %s\", self.op, arg1ty)\n            return None\n\n        return resty\n\n\nclass Load(IRExpr):\n    \"\"\"\n    A load from memory.\n    \"\"\"\n\n    __slots__ = [\"end\", \"ty\", \"addr\"]\n\n    tag = \"Iex_Load\"\n\n    def __init__(self, end, ty, addr):\n        self.end = end\n        self.ty = ty\n        self.addr = addr\n\n    @property\n    def endness(self):\n        return self.end\n\n    @property\n    def type(self):\n        return self.ty\n\n    def _pp_str(self):\n        return f\"LD{self.end[-2:].lower()}:{self.ty[4:]}({self.addr})\"\n\n    @staticmethod\n    def _from_c(c_expr):\n        return Load(\n            get_enum_from_int(c_expr.Iex.Load.end),\n            get_enum_from_int(c_expr.Iex.Load.ty),\n            IRExpr._from_c(c_expr.Iex.Load.addr),\n        )\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_Load(get_int_from_enum(expr.end), get_int_from_enum(expr.ty), IRExpr._to_c(expr.addr))\n\n    def result_type(self, tyenv):\n        return self.ty\n\n    def typecheck(self, tyenv):\n        addrty = self.addr.typecheck(tyenv)\n        if addrty is None:\n            return None\n        if addrty != tyenv.wordty:\n            log.debug(\"Address must be word-sized\")\n            return None\n        return self.ty\n\n\nclass Const(IRExpr):\n    \"\"\"\n    A constant expression.\n    \"\"\"\n\n    __slots__ = [\"_con\"]\n\n    tag = \"Iex_Const\"\n\n    def __init__(self, con: IRConst):\n        self._con = con\n\n    def _pp_str(self):\n        return str(self.con)\n\n    @property\n    def con(self) -> IRConst:\n        return self._con\n\n    @staticmethod\n    def _from_c(c_expr):\n        con = IRConst._from_c(c_expr.Iex.Const.con)\n        return Const.get_instance(con)\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_Const(IRConst._to_c(expr.con))\n\n    @staticmethod\n    def get_instance(con):\n        if con.value < 1024 and con.__class__ in _CONST_POOL:\n            return _CONST_POOL[con.__class__][con.value]\n        return Const(con)\n\n    def result_type(self, tyenv):\n        return self.con.type\n\n\n_CONST_POOL = {\n    U8: [Const(U8(i)) for i in range(0, 1024)],\n    U16: [Const(U16(i)) for i in range(0, 1024)],\n    U32: [Const(U32(i)) for i in range(0, 1024)],\n    U64: [Const(U64(i)) for i in range(0, 1024)],\n}\n\n\nclass ITE(IRExpr):\n    \"\"\"\n    An if-then-else expression.\n    \"\"\"\n\n    __slots__ = [\"cond\", \"iffalse\", \"iftrue\"]\n\n    tag = \"Iex_ITE\"\n\n    def __init__(self, cond, iffalse, iftrue):\n        self.cond = cond\n        self.iffalse = iffalse\n        self.iftrue = iftrue\n\n    def _pp_str(self):\n        return f\"ITE({self.cond},{self.iftrue},{self.iffalse})\"\n\n    @staticmethod\n    def _from_c(c_expr):\n        return ITE(\n            IRExpr._from_c(c_expr.Iex.ITE.cond),\n            IRExpr._from_c(c_expr.Iex.ITE.iffalse),\n            IRExpr._from_c(c_expr.Iex.ITE.iftrue),\n        )\n\n    @staticmethod\n    def _to_c(expr):\n        return pvc.IRExpr_ITE(IRExpr._to_c(expr.cond), IRExpr._to_c(expr.iftrue), IRExpr._to_c(expr.iffalse))\n\n    def result_type(self, tyenv):\n        return self.iftrue.result_type(tyenv)\n\n    def typecheck(self, tyenv):\n        condty = self.cond.typecheck(tyenv)\n        falsety = self.iffalse.typecheck(tyenv)\n        truety = self.iftrue.typecheck(tyenv)\n\n        if condty is None or falsety is None or truety is None:\n            return None\n\n        if condty != \"Ity_I1\":\n            log.debug(\"guard must be Ity_I1\")\n            return None\n\n        if falsety != truety:\n            log.debug(\"false condition must be same type as true condition\")\n            return None\n\n        return falsety\n\n\nclass CCall(IRExpr):\n    \"\"\"\n    A call to a pure (no side-effects) helper C function.\n    \"\"\"\n\n    __slots__ = [\"retty\", \"cee\", \"args\"]\n\n    tag = \"Iex_CCall\"\n\n    def __init__(self, retty, cee, args):\n        self.retty = retty\n        self.cee = cee\n        self.args = tuple(args)\n\n    @property\n    def ret_type(self):\n        return self.retty\n\n    @property\n    def callee(self):\n        return self.cee\n\n    def _pp_str(self):\n        return \"{}({}):{}\".format(self.cee, \",\".join(str(a) for a in self.args), self.retty)\n\n    @property\n    def child_expressions(self):\n        expressions = sum((a.child_expressions for a in self.args), [])\n        expressions.extend(self.args)\n        return expressions\n\n    @staticmethod\n    def _from_c(c_expr):\n        i = 0\n        args = []\n        while True:\n            arg = c_expr.Iex.CCall.args[i]\n            if arg == ffi.NULL:\n                break\n            args.append(IRExpr._from_c(arg))\n            i += 1\n\n        return CCall(get_enum_from_int(c_expr.Iex.CCall.retty), IRCallee._from_c(c_expr.Iex.CCall.cee), tuple(args))\n\n    @staticmethod\n    def _to_c(expr):\n        args = [IRExpr._to_c(arg) for arg in expr.args]\n        mkIRExprVec = getattr(pvc, \"mkIRExprVec_%d\" % len(args))\n        return pvc.IRExpr_CCall(IRCallee._to_c(expr.cee), get_int_from_enum(expr.retty), mkIRExprVec(*args))\n\n    def result_type(self, tyenv):\n        return self.retty\n\n\ndef get_op_retty(op):\n    return op_arg_types(op)[0]\n\n\nop_signatures: dict[str, tuple[str, tuple[str, ...]]] = {}\n\n\ndef _request_op_type_from_cache(op):\n    return op_signatures[op]\n\n\ndef _request_op_type_from_libvex(op):\n    Ity_INVALID = 0x1100  # as defined in enum IRType in VEX\n\n    res_ty = ffi.new(\"IRType *\")\n    arg_tys = [ffi.new(\"IRType *\") for _ in range(4)]\n    # initialize all IRTypes to Ity_INVALID\n    for arg in arg_tys:\n        arg[0] = Ity_INVALID\n    pvc.typeOfPrimop(get_int_from_enum(op), res_ty, *arg_tys)\n    arg_ty_vals = [a[0] for a in arg_tys]\n\n    try:\n        numargs = arg_ty_vals.index(Ity_INVALID)\n    except ValueError:\n        numargs = 4\n    args_tys_list = [get_enum_from_int(arg_ty_vals[i]) for i in range(numargs)]\n\n    op_ty_sig = (get_enum_from_int(res_ty[0]), tuple(args_tys_list))\n    op_signatures[op] = op_ty_sig\n    return op_ty_sig\n\n\nclass PyvexOpMatchException(Exception):\n    pass\n\n\nclass PyvexTypeErrorException(Exception):\n    pass\n\n\ndef int_type_for_size(size):\n    return \"Ity_I%d\" % size\n\n\n# precompiled regexes\nunop_signature_re = re.compile(r\"Iop_(Not|Ctz|Clz)(?P<size>\\d+)$\")\nbinop_signature_re = re.compile(r\"Iop_(Add|Sub|Mul|Xor|Or|And|Div[SU]|Mod)(?P<size>\\d+)$\")\nshift_signature_re = re.compile(r\"Iop_(Shl|Shr|Sar)(?P<size>\\d+)$\")\ncmp_signature_re_1 = re.compile(r\"Iop_Cmp(EQ|NE)(?P<size>\\d+)$\")\ncmp_signature_re_2 = re.compile(r\"Iop_Cmp(GT|GE|LT|LE)(?P<size>\\d+)[SU]$\")\nmull_signature_re = re.compile(r\"Iop_Mull[SU](?P<size>\\d+)$\")\nhalf_signature_re = re.compile(r\"Iop_DivMod[SU](?P<fullsize>\\d+)to(?P<halfsize>\\d+)$\")\ncast_signature_re = re.compile(r\"Iop_(?P<srcsize>\\d+)(U|S|HI|HL)?to(?P<dstsize>\\d+)\")\n\n\ndef unop_signature(op):\n    m = unop_signature_re.match(op)\n    if m is None:\n        raise PyvexOpMatchException()\n    size = int(m.group(\"size\"))\n    size_type = int_type_for_size(size)\n    return size_type, (size_type,)\n\n\ndef binop_signature(op):\n    m = binop_signature_re.match(op)\n    if m is None:\n        raise PyvexOpMatchException()\n    size = int(m.group(\"size\"))\n    size_type = int_type_for_size(size)\n    return (size_type, (size_type, size_type))\n\n\ndef shift_signature(op):\n    m = shift_signature_re.match(op)\n    if m is None:\n        raise PyvexOpMatchException()\n    size = int(m.group(\"size\"))\n    if size > 255:\n        raise PyvexTypeErrorException(\"Cannot apply shift operation to %d size int because shift index is 8-bit\" % size)\n    size_type = int_type_for_size(size)\n    return (size_type, (size_type, int_type_for_size(8)))\n\n\ndef cmp_signature(op):\n    m = cmp_signature_re_1.match(op)\n    m2 = cmp_signature_re_2.match(op)\n    if (m is None) == (m2 is None):\n        raise PyvexOpMatchException()\n    mfound = m if m is not None else m2\n    assert mfound is not None\n    size = int(mfound.group(\"size\"))\n    size_type = int_type_for_size(size)\n    return (int_type_for_size(1), (size_type, size_type))\n\n\ndef mull_signature(op):\n    m = mull_signature_re.match(op)\n    if m is None:\n        raise PyvexOpMatchException()\n    size = int(m.group(\"size\"))\n    size_type = int_type_for_size(size)\n    doubled_size_type = int_type_for_size(2 * size)\n    return (doubled_size_type, (size_type, size_type))\n\n\ndef half_signature(op):\n    m = half_signature_re.match(op)\n    if m is None:\n        raise PyvexOpMatchException()\n    fullsize = int(m.group(\"fullsize\"))\n    halfsize = int(m.group(\"halfsize\"))\n    if halfsize * 2 != fullsize:\n        raise PyvexTypeErrorException(\"Invalid Instruction %s: Type 1 must be twice the size of type 2\" % op)\n    fullsize_type = int_type_for_size(fullsize)\n    halfsize_type = int_type_for_size(halfsize)\n    return (fullsize_type, (fullsize_type, halfsize_type))\n\n\ndef cast_signature(op):\n    m = cast_signature_re.match(op)\n    if m is None:\n        raise PyvexOpMatchException()\n    src_type = int_type_for_size(int(m.group(\"srcsize\")))\n    dst_type = int_type_for_size(int(m.group(\"dstsize\")))\n    return (dst_type, (src_type,))\n\n\npolymorphic_op_processors = [\n    unop_signature,\n    binop_signature,\n    shift_signature,\n    cmp_signature,\n    mull_signature,\n    half_signature,\n    cast_signature,\n]\n\n\ndef _request_polymorphic_op_type(op):\n    for polymorphic_signature in polymorphic_op_processors:\n        try:\n            op_ty_sig = polymorphic_signature(op)\n            break\n        except PyvexOpMatchException:\n            continue\n    else:\n        raise PyvexOpMatchException(\"Op %s not recognized\" % op)\n    return op_ty_sig\n\n\n_request_funcs = [_request_op_type_from_cache, _request_op_type_from_libvex, _request_polymorphic_op_type]\n\n\ndef op_arg_types(op):\n    for _request_func in _request_funcs:\n        try:\n            return _request_func(op)\n        except KeyError:\n            continue\n    raise ValueError(\"Cannot find type of op %s\" % op)\n\n\n_globals = globals().copy()\n#\n# Mapping from tag strings/enums to IRExpr classes\n#\ntag_to_expr_mapping = {}\nenum_to_expr_mapping = {}\ntag_count = 0\ncls = None\nfor cls in _globals.values():\n    if type(cls) is type and issubclass(cls, IRExpr) and cls is not IRExpr:\n        tag_to_expr_mapping[cls.tag] = cls\n        enum_to_expr_mapping[get_int_from_enum(cls.tag)] = cls\n        cls.tag_int = tag_count\n        tag_count += 1\ndel cls\n\n\ndef tag_to_expr_class(tag):\n    \"\"\"\n    Convert a tag string to the corresponding IRExpr class type.\n\n    :param str tag: The tag string.\n    :return:        A class.\n    :rtype:         type\n    \"\"\"\n\n    try:\n        return tag_to_expr_mapping[tag]\n    except KeyError:\n        raise KeyError(\"Cannot find expression class for type %s.\" % tag)\n\n\ndef enum_to_expr_class(tag_enum):\n    \"\"\"\n    Convert a tag enum to the corresponding IRExpr class.\n\n    :param int tag_enum: The tag enum.\n    :return:             A class.\n    :rtype:              type\n    \"\"\"\n\n    try:\n        return enum_to_expr_mapping[tag_enum]\n    except KeyError:\n        raise KeyError(\"Cannot find expression class for type %s.\" % get_enum_from_int(tag_enum))\n"
  },
  {
    "path": "pyvex/lifting/__init__.py",
    "content": "from .gym import AARCH64Spotter, AMD64Spotter, ARMSpotter, X86Spotter\nfrom .libvex import LIBVEX_SUPPORTED_ARCHES, LibVEXLifter\nfrom .lift_function import lift, lifters, register\nfrom .lifter import Lifter\nfrom .post_processor import Postprocessor\nfrom .zerodivision import ZeroDivisionPostProcessor\n\nfor arch in LIBVEX_SUPPORTED_ARCHES:\n    register(LibVEXLifter, arch)\nregister(AARCH64Spotter, \"AARCH64\")\nregister(ARMSpotter, \"ARM\")\nregister(ARMSpotter, \"ARMEL\")\nregister(ARMSpotter, \"ARMHF\")\nregister(ARMSpotter, \"ARMCortexM\")\nregister(AMD64Spotter, \"AMD64\")\nregister(X86Spotter, \"X86\")\n\n__all__ = [\"Lifter\", \"Postprocessor\", \"lift\", \"register\", \"lifters\", \"ZeroDivisionPostProcessor\"]\n"
  },
  {
    "path": "pyvex/lifting/gym/README.md",
    "content": "# The Gym\n\nThis is where we're putting non-libvex lifters that we feel should be included with the pyvex distribution.\n\nThese will probably be mostly \"spotters\", which correct for gaps in libvex's instruction support.\n\n\n"
  },
  {
    "path": "pyvex/lifting/gym/__init__.py",
    "content": "from .aarch64_spotter import AARCH64Spotter\nfrom .arm_spotter import ARMSpotter\nfrom .x86_spotter import AMD64Spotter, X86Spotter\n\n__all__ = (\"ARMSpotter\", \"AARCH64Spotter\", \"X86Spotter\", \"AMD64Spotter\")\n"
  },
  {
    "path": "pyvex/lifting/gym/aarch64_spotter.py",
    "content": "import logging\n\nfrom pyvex.lifting.util.instr_helper import Instruction\nfrom pyvex.lifting.util.lifter_helper import GymratLifter\n\nlog = logging.getLogger(__name__)\n\n\nclass Aarch64Instruction(Instruction):  # pylint: disable=abstract-method\n    # NOTE: WARNING: There is no MRS, MSR, SYSL in VEX's ARM implementation\n    # You must use straight nasty hacks instead.\n    pass\n\n\nclass Instruction_SYSL(Aarch64Instruction):\n    name = \"SYSL\"\n    bin_format = \"1101010100101qqqnnnnmmmmppprrrrr\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        log.debug(\"Ignoring SYSL instruction at %#x.\", self.addr)\n\n\nclass Instruction_MSR(Aarch64Instruction):\n    name = \"MSR\"\n    bin_format = \"11010101000ioqqqnnnnmmmmppprrrrr\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        log.debug(\"Ignoring MSR instruction at %#x.\", self.addr)\n\n\nclass Instruction_MRS(Aarch64Instruction):\n    name = \"MRS\"\n    bin_format = \"110101010011opppnnnnmmmmppprrrrr\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        log.debug(\"Ignoring MRS instruction at %#x.\", self.addr)\n\n\nclass AARCH64Spotter(GymratLifter):\n    instrs = [Instruction_MRS, Instruction_MSR, Instruction_SYSL]\n"
  },
  {
    "path": "pyvex/lifting/gym/arm_spotter.py",
    "content": "import logging\n\nimport bitstring\n\nfrom pyvex.lifting.util import JumpKind, Type\nfrom pyvex.lifting.util.instr_helper import Instruction, ParseError\nfrom pyvex.lifting.util.lifter_helper import GymratLifter\nfrom pyvex.types import Arch\n\nlog = logging.getLogger(__name__)\n\n\nclass ARMInstruction(Instruction):  # pylint: disable=abstract-method\n    # NOTE: WARNING: There is no CPSR in VEX's ARM implementation\n    # You must use straight nasty hacks instead.\n\n    # NOTE 2: Something is goofy w/r/t archinfo and VEX; cc_op3 is used in ccalls, but there's\n    # no cc_op3 in archinfo, angr itself uses cc_depn instead.  We do the same.\n\n    def match_instruction(self, data, bitstrm):\n        \"\"\"\n        ARM Instructions are pretty dense, so let's do what we can to weed them out\n        \"\"\"\n        if \"c\" not in data or data[\"c\"] == \"1111\":\n            raise ParseError(\"Invalid ARM Instruction\")\n\n    def get_N(self):\n        cc_op = self.get(\"cc_op\", Type.int_32)\n        cc_dep1 = self.get(\"cc_dep1\", Type.int_32)\n        cc_dep2 = self.get(\"cc_dep2\", Type.int_32)\n        cc_depn = self.get(\"cc_ndep\", Type.int_32)\n        return self.ccall(Type.int_32, \"armg_calculate_flag_n\", [cc_op, cc_dep1, cc_dep2, cc_depn])\n\n    def get_C(self):\n        cc_op = self.get(\"cc_op\", Type.int_32)\n        cc_dep1 = self.get(\"cc_dep1\", Type.int_32)\n        cc_dep2 = self.get(\"cc_dep2\", Type.int_32)\n        cc_depn = self.get(\"cc_ndep\", Type.int_32)\n        return self.ccall(Type.int_32, \"armg_calculate_flag_c\", [cc_op, cc_dep1, cc_dep2, cc_depn])\n\n    def get_V(self):\n        cc_op = self.get(\"cc_op\", Type.int_32)\n        cc_dep1 = self.get(\"cc_dep1\", Type.int_32)\n        cc_dep2 = self.get(\"cc_dep2\", Type.int_32)\n        cc_depn = self.get(\"cc_ndep\", Type.int_32)\n        return self.ccall(Type.int_32, \"armg_calculate_flag_v\", [cc_op, cc_dep1, cc_dep2, cc_depn])\n\n    def get_Z(self):\n        cc_op = self.get(\"cc_op\", Type.int_32)\n        cc_dep1 = self.get(\"cc_dep1\", Type.int_32)\n        cc_dep2 = self.get(\"cc_dep2\", Type.int_32)\n        cc_depn = self.get(\"cc_ndep\", Type.int_32)\n        return self.ccall(Type.int_32, \"armg_calculate_flag_z\", [cc_op.rdt, cc_dep1.rdt, cc_dep2.rdt, cc_depn.rdt])\n\n    def evaluate_condition(self):\n        # condition codes should be in 'c'\n        cond = self.data[\"c\"]\n        if cond == \"0000\":\n            # equal, z set\n            return self.get_Z() == 1\n        elif cond == \"0001\":\n            # not equal, Z clear\n            return self.get_Z() == 0\n        elif cond == \"0010\":\n            # Carry, C set\n            return self.get_C() == 1\n        elif cond == \"0011\":\n            # Carry Clear, C clear\n            return self.get_C() == 0\n        elif cond == \"0100\":\n            # MI / neagative / N set\n            return self.get_N() == 1\n        elif cond == \"0101\":\n            # PL / plus / positive / N clear\n            return self.get_N() == 0\n        elif cond == \"0110\":\n            # VS / V set / Overflow\n            return self.get_V() == 1\n        elif cond == \"0111\":\n            # VC / V Clear / no overflow\n            return self.get_V() == 0\n        elif cond == \"1000\":\n            # Hi / unsigned higher / C set, Z clear\n            return (self.get_C() == 1) & (self.get_Z() == 0)\n        elif cond == \"1001\":\n            # LS / C clear, Z set\n            return (self.get_C() == 0) & (self.get_Z() == 1)\n        elif cond == \"1011\":\n            # LT / Less than / N != V\n            return self.get_N() != self.get_V()\n        elif cond == \"1100\":\n            # GT / greater than / Z clear and (n == v)\n            return (self.get_Z() == 1) & (self.get_N() != self.get_V())\n        elif cond == \"1101\":\n            # LE / less than or equal to / Z set OR (N != V)\n            return (self.get_Z() == 1) | (self.get_N() != self.get_V())\n        else:\n            # No condition\n            return None\n\n    def _load_le_instr(self, bitstream: bitstring.ConstBitStream, numbits: int) -> str:\n        # THUMB mode instructions swap endianness every two bytes!\n        if (self.addr & 1) == 1 and numbits > 16:\n            chunk = \"\"\n            oldpos = bitstream.pos\n            try:\n                for _ in range(0, numbits, 16):\n                    chunk += bitstring.Bits(uint=bitstream.peek(\"uintle:%d\" % 16), length=16).bin\n                    bitstream.pos += 16\n            finally:\n                bitstream.pos = oldpos\n            return chunk\n        return super()._load_le_instr(bitstream, numbits)\n\n\nclass Instruction_MRC(ARMInstruction):\n    name = \"MRC\"\n    bin_format = \"cccc1110CCC1nnnnddddppppOOOOOOOO\"\n    # 11101110000100010001111100010000\n    # c = cond\n    # C = Coprocessor operation mode\n    # d = CPd\n    # O = Offset\n    # p = CP#\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # TODO at least look at the conditionals\n        # TODO Clobber the dst reg of MCR\n        # TODO maybe treat coproc regs as simple storage (even though they are very much not)\n        log.debug(\"Ignoring MRC instruction at %#x.\", self.addr)\n\n\nclass Instruction_MCR(ARMInstruction):\n    name = \"MCR\"\n    bin_format = \"cccc1110CCC0nnnnddddppppOOOOOOOO\"\n    # 11101110000000010000111100010000\n    # c = cond\n    # C = Coprocessor operation mode\n    # d = CPd\n    # O = Offset\n    # p = CP#\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # TODO at least look at the conditionals\n        # TODO Clobber the dst reg of MCR\n        # TODO maybe treat coproc regs as simple storage (even though they are very much not)\n        log.debug(\"Ignoring MCR instruction at %#x.\", self.addr)\n\n\nclass Instruction_MSR(ARMInstruction):\n    name = \"MSR\"\n    bin_format = \"cccc00i10d10xxxj1111ssssssssssss\"\n    #             11100011001000011111000010010001\n    #             11100001011011111111000000000001\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        log.debug(\n            \"Ignoring MSR instruction at %#x. VEX cannot support this instruction. \"\n            \"See pyvex/lifting/gym/arm_spotter.py\",\n            self.addr,\n        )\n\n\nclass Instruction_MRS(ARMInstruction):\n    name = \"MRS\"\n    bin_format = \"cccc00010s001111dddd000000000000\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        log.debug(\n            \"Ignoring MRS instruction at %#x. VEX cannot support this instruction. \"\n            \"See pyvex/lifting/gym/arm_spotter.py\",\n            self.addr,\n        )\n\n\nclass Instruction_STM(ARMInstruction):\n    name = \"STM\"\n    bin_format = \"cccc100pu1w0bbbbrrrrrrrrrrrrrrrr\"\n\n    def match_instruction(self, data, bitstrm):\n        # If we don't push anything, that's not real\n        if int(data[\"r\"]) == 0:\n            raise ParseError(\"Invalid STM instruction\")\n        return True\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        log.debug(\n            \"Ignoring STMxx ^ instruction at %#x. This mode is not implemented by VEX! \"\n            \"See pyvex/lifting/gym/arm_spotter.py\",\n            self.addr,\n        )\n\n\nclass Instruction_LDM(ARMInstruction):\n    name = \"LDM\"\n    bin_format = \"cccc100PU1W1bbbbrrrrrrrrrrrrrrrr\"\n\n    def match_instruction(self, data, bitstrm):\n        # If we don't push anything, that's not real\n        if int(data[\"r\"]) == 0:\n            raise ParseError(\"Invalid LDM instruction\")\n        return True\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # test if PC will be set. If so, the jumpkind of this block should be Ijk_Ret\n        log.debug(\"Spotting an LDM instruction at %#x.  This is not fully tested.  Prepare for errors.\", self.addr)\n\n        src_n = f\"r{int(self.data['b'], 2)}\"\n        src = self.get(src_n, Type.int_32)\n\n        for reg_num, bit in enumerate(self.data[\"r\"]):\n            reg_num = 15 - reg_num\n            if bit == \"1\":\n                if self.data[\"P\"] == \"1\":\n                    if self.data[\"U\"] == \"0\":\n                        src += 4\n                    else:\n                        src -= 4\n                val = self.load(src, Type.int_32)\n                self.put(val, f\"r{reg_num}\")\n                if self.data[\"P\"] == \"0\":\n                    if self.data[\"U\"] == \"0\":\n                        src += 4\n                    else:\n                        src -= 4\n                # If we touch PC, we're doing a RET!\n                if reg_num == 15 and bit == \"1\":\n                    cond = self.evaluate_condition()\n                    if cond is not None:\n                        self.jump(cond, val, JumpKind.Ret)\n                    else:\n                        self.jump(None, val, JumpKind.Ret)\n        # Write-back\n        if self.data[\"W\"] == \"1\":\n            self.put(src, src_n)\n\n\nclass Instruction_STC(ARMInstruction):\n    name = \"STC\"\n    bin_format = \"cccc110PUNW0nnnnddddppppOOOOOOOO\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # TODO At least look at the conditionals\n        log.debug(\"Ignoring STC instruction at %#x.\", self.addr)\n\n\nclass Instruction_STC_THUMB(ARMInstruction):\n    name = \"STC\"\n    bin_format = \"111c110PUNW0nnnnddddppppOOOOOOOO\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # TODO At least look at the conditionals\n        log.debug(\"Ignoring STC instruction at %#x.\", self.addr)\n\n\nclass Instruction_LDC(ARMInstruction):\n    name = \"LDC\"\n    bin_format = \"cccc110PUNW1nnnnddddppppOOOOOOOO\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # TODO At least look at the conditionals\n        # TODO Clobber the dest reg of LDC\n        # TODO Maybe clobber the dst reg of CDP, if we're really adventurous\n        log.debug(\"Ignoring LDC instruction at %#x.\", self.addr)\n\n\nclass Instruction_LDC_THUMB(ARMInstruction):\n    name = \"LDC\"\n    bin_format = \"111c110PUNW1nnnnddddppppOOOOOOOO\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # TODO At least look at the conditionals\n        # TODO Clobber the dest reg of LDC\n        # TODO Maybe clobber the dst reg of CDP, if we're really adventurous\n        log.debug(\"Ignoring LDC instruction at %#x.\", self.addr)\n\n\nclass Instruction_CDP(Instruction):\n    name = \"CDP\"\n    bin_format = \"cccc1110oooonnnnddddppppPPP0mmmm\"\n    # c = cond\n    # d = CPd\n    # O = Offset\n    # p = CP#\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # TODO At least look at the conditionals\n        # TODO Maybe clobber the dst reg of CDP, if we're really adventurous\n        log.debug(\"Ignoring CDP instruction at %#x.\", self.addr)\n\n\n##\n## Thumb! (ugh)\n##\n\n\nclass ThumbInstruction(Instruction):  # pylint: disable=abstract-method\n    def mark_instruction_start(self):\n        self.irsb_c.imark(self.addr - 1, self.bytewidth, 1)\n\n\nclass Instruction_tCPSID(ThumbInstruction):\n    name = \"CPSID\"\n    bin_format = \"101101x0011x0010\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # TODO haha lol yeah right\n        log.debug(\"[thumb] Ignoring CPS instruction at %#x.\", self.addr)\n\n\nclass Instruction_tMSR(ThumbInstruction):\n    name = \"tMSR\"\n    bin_format = \"10x0mmmmxxxxxxxx11110011100Rrrrr\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        dest_spec_reg = int(self.data[\"x\"], 2)\n        src_reg = f\"r{int(self.data['r'], 2)}\"\n\n        # If 0, do not write the SPSR\n        if self.data[\"R\"] == \"0\":\n            if dest_spec_reg == 8:  # msp\n                src = self.get(src_reg, Type.int_32)\n                self.put(src, \"sp\")\n            elif dest_spec_reg == 16:  # primask\n                src = self.get(src_reg, Type.int_32)\n                self.put(src, \"primask\")\n            else:\n                log.debug(\n                    \"[thumb] FIXME: tMSR at %#x is writing into an unsupported special register %#x. \"\n                    \"Ignoring the instruction.\",\n                    self.addr,\n                    dest_spec_reg,\n                )\n        else:\n            log.debug(\"[thumb] tMSR at %#x is writing SPSR. Ignoring the instruction. FixMe.\", self.addr)\n        log.debug(\n            \"[thumb] Spotting an tMSR instruction at %#x.  This is not fully tested.  Prepare for errors.\", self.addr\n        )\n\n\nclass Instruction_tMRS(ThumbInstruction):\n    name = \"tMRS\"\n    bin_format = \"10x0mmmmxxxxxxxx11110011111Rrrrr\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        spec_reg = int(self.data[\"x\"], 2)\n        dest_reg = f\"r{int(self.data['m'], 2)}\"\n\n        # Reading from CPSR\n        if self.data[\"R\"] == \"0\":\n            # See special registers constants here:\n            # https://github.com/aquynh/capstone/blob/45bec1a691e455b864f7e4d394711a467e5493dc/arch/ARM/ARMInstPrinter.c#L1654\n            if spec_reg == 8:\n                # We move the SP and call it a day.\n                src = self.get(\"sp\", Type.int_32)\n                self.put(src, dest_reg)\n            elif spec_reg == 16:\n                src = self.get(\"primask\", Type.int_32)\n                self.put(src, dest_reg)\n            else:\n                log.debug(\n                    \"[thumb] FIXME: tMRS at %#x is using the unsupported special register %#x. \"\n                    \"Ignoring the instruction.\",\n                    self.addr,\n                    spec_reg,\n                )\n        else:\n            log.debug(\"[thumb] tMRS at %#x is reading from SPSR. Ignoring the instruction. FixMe.\", self.addr)\n            log.debug(\"[thumb] Ignoring tMRS instruction at %#x.\", self.addr)\n        log.debug(\n            \"[thumb] Spotting an tMRS instruction at %#x.  This is not fully tested.  Prepare for errors.\", self.addr\n        )\n\n\nclass Instruction_tDMB(ThumbInstruction):\n    name = \"DMB\"\n    bin_format = \"100011110101xxxx1111001110111111\"\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        # TODO haha lol yeah right\n        log.debug(\"[thumb] Ignoring DMB instruction at %#x.\", self.addr)\n\n\nclass Instruction_WFI(ThumbInstruction):\n    name = \"WFI\"\n    bin_format = \"10111111001a0000\"\n    # 1011111100110000\n\n    def compute_result(self):  # pylint: disable=arguments-differ\n        log.debug(\"[thumb] Ignoring WFI instruction at %#x.\", self.addr)\n\n\nclass ARMSpotter(GymratLifter):\n    arm_instrs = [\n        Instruction_MRC,\n        Instruction_MCR,\n        Instruction_MSR,\n        Instruction_MRS,\n        Instruction_STM,\n        Instruction_LDM,\n        Instruction_STC,\n        Instruction_LDC,\n        Instruction_CDP,\n    ]\n    thumb_instrs = [\n        Instruction_tCPSID,\n        Instruction_tMSR,\n        Instruction_tMRS,\n        Instruction_WFI,\n        Instruction_tDMB,\n        Instruction_STC_THUMB,\n        Instruction_LDC_THUMB,\n    ]\n\n    def __init__(self, arch: Arch, addr: int):\n        super().__init__(arch, addr)\n        self.thumb: bool = False\n\n    def _lift(self):\n        if self.irsb.addr & 1:\n            # Thumb!\n            self.instrs = self.thumb_instrs\n            self.thumb = True\n        else:\n            self.instrs = self.arm_instrs\n            self.thumb = False\n        super()._lift()\n"
  },
  {
    "path": "pyvex/lifting/gym/x86_spotter.py",
    "content": "import logging\n\nfrom pyvex.lifting.util import GymratLifter, Instruction, JumpKind, Type\n\nlog = logging.getLogger(__name__)\n\n# pylint: disable=missing-class-docstring\n\n\nclass Instruction_SWAPGS(Instruction):\n    name = \"SWAPGS\"\n    bin_format = \"000011110000000111111000\"  # 0f 01 f8\n\n    def compute_result(self, *args):\n        pass  # TODO check for priv mode\n\n\nclass Instruction_SYSRET(Instruction):\n    name = \"SYSRET\"\n    bin_format = \"010010000000111100000111\"  # 48 04 07\n\n    def compute_result(self, *args):\n        result = self.dirty(Type.int_64, \"%sg_dirtyhelper_SYSRET\" % self.arch.name.lower(), ())\n        self.jump(None, result, JumpKind.Ret)\n\n\nclass Instruction_IRETQ(Instruction):\n    name = \"IRETQ\"\n    bin_format = \"0100100011001111\"  # 48 cf\n\n    def compute_result(self, *args):\n        result = self.dirty(Type.int_64, \"%sg_dirtyhelper_IRETQ\" % self.arch.name.lower(), ())\n        self.jump(None, result, JumpKind.Ret)\n\n\nclass Instruction_RDMSR(Instruction):\n    name = \"RDMSR\"\n    bin_format = \"0000111100110010\"  # 0f 32\n\n    def compute_result(self, *args):\n        ecx = self.get(\"ecx\", Type.int_32)\n        result = self.dirty(Type.int_64, \"%sg_dirtyhelper_RDMSR\" % self.arch.name.lower(), (ecx,))\n        edx = result.narrow_high(Type.int_32)\n        eax = result.narrow_low(Type.int_32)\n        if self.arch.bits == 32:\n            self.put(eax, \"eax\")\n            self.put(edx, \"edx\")\n        else:\n            self.put(eax.widen_unsigned(Type.int_64), \"rax\")\n            self.put(edx.widen_unsigned(Type.int_64), \"rdx\")\n\n\nclass Instruction_XGETBV(Instruction):\n    name = \"XGETBV\"\n    bin_format = \"000011110000000111010000\"  # 0f 01 d0\n\n    def compute_result(self, *args):\n        ecx = self.get(\"ecx\", Type.int_32)\n        result = self.dirty(Type.int_64, \"%sg_dirtyhelper_XGETBV\" % self.arch.name.lower(), (ecx,))\n        edx = result.narrow_high(Type.int_32)\n        eax = result.narrow_low(Type.int_32)\n        if self.arch.bits == 32:\n            self.put(eax, \"eax\")\n            self.put(edx, \"edx\")\n        else:\n            self.put(eax.widen_unsigned(Type.int_64), \"rax\")\n            self.put(edx.widen_unsigned(Type.int_64), \"rdx\")\n\n\nclass Instruction_AAM(Instruction):\n    name = \"AAM\"\n    bin_format = \"11010100iiiiiiii\"\n\n    # From https://www.felixcloutier.com/x86/aam\n    def compute_result(self):  # pylint: disable=arguments-differ\n        base = self.constant(int(self.data[\"i\"], 2), Type.int_8)\n        temp_al = self.get(\"al\", Type.int_8)\n        temp_ah = temp_al // base\n        temp_al = temp_al % base\n        self.put(temp_ah, \"ah\")\n        self.put(temp_al, \"al\")\n        log.debug(\n            \"The generalized AAM instruction is not supported by VEX, and is handled specially by pyvex.\"\n            \" It has no flag handling at present.  See pyvex/lifting/gym/x86_spotter.py for details\"\n        )\n\n    # TODO: Flags\n\n\nclass Instruction_AAD(Instruction):\n    name = \"AAD\"\n    bin_format = \"11010101iiiiiiii\"\n\n    # From https://www.felixcloutier.com/x86/aad\n    def compute_result(self):  # pylint: disable=arguments-differ\n        base = self.constant(int(self.data[\"i\"], 2), Type.int_8)\n        temp_al = self.get(\"al\", Type.int_8)\n        temp_ah = self.get(\"ah\", Type.int_8)\n        temp_al = (temp_al + (temp_ah * base)) & 0xFF\n        temp_ah = self.constant(0, Type.int_8)\n        self.put(temp_ah, \"ah\")\n        self.put(temp_al, \"al\")\n        log.debug(\n            \"The generalized AAD instruction is not supported by VEX, and is handled specially by pyvex.\"\n            \" It has no flag handling at present.  See pyvex/lifting/gym/x86_spotter.py for details\"\n        )\n\n    # TODO: Flags\n\n\nclass AMD64Spotter(GymratLifter):\n    instrs = [\n        Instruction_RDMSR,\n        Instruction_XGETBV,\n        Instruction_AAD,\n        Instruction_AAM,\n        Instruction_SWAPGS,\n        Instruction_IRETQ,\n        Instruction_SYSRET,\n    ]\n\n\nclass X86Spotter(GymratLifter):\n    instrs = [\n        Instruction_RDMSR,\n        Instruction_XGETBV,\n        Instruction_AAD,\n        Instruction_AAM,\n    ]\n"
  },
  {
    "path": "pyvex/lifting/libvex.py",
    "content": "import logging\nimport threading\nfrom typing import TYPE_CHECKING\n\nfrom pyvex.errors import LiftingException\nfrom pyvex.native import ffi, pvc\nfrom pyvex.types import CLiftSource, LibvexArch\n\nfrom .lift_function import Lifter\n\nlog = logging.getLogger(\"pyvex.lifting.libvex\")\n\n_libvex_lock = threading.Lock()\n\nLIBVEX_SUPPORTED_ARCHES = {\n    \"X86\",\n    \"AMD64\",\n    \"MIPS32\",\n    \"MIPS64\",\n    \"ARM\",\n    \"ARMEL\",\n    \"ARMHF\",\n    \"ARMCortexM\",\n    \"AARCH64\",\n    \"PPC32\",\n    \"PPC64\",\n    \"S390X\",\n    \"RISCV64\",\n}\n\nVEX_MAX_INSTRUCTIONS = 99\nVEX_MAX_BYTES = 5000\n\n\nclass VexRegisterUpdates:\n    VexRegUpd_INVALID = 0x700\n    VexRegUpdSpAtMemAccess = 0x701\n    VexRegUpdUnwindregsAtMemAccess = 0x702\n    VexRegUpdAllregsAtMemAccess = 0x703\n    VexRegUpdAllregsAtEachInsn = 0x704\n    VexRegUpdLdAllregsAtEachInsn = 0x705\n\n\nclass LibVEXLifter(Lifter):\n    __slots__ = ()\n\n    REQUIRE_DATA_C = True\n\n    @staticmethod\n    def get_vex_log():\n        return bytes(ffi.buffer(pvc.msg_buffer, pvc.msg_current_size)).decode() if pvc.msg_buffer != ffi.NULL else None\n\n    def _lift(self):\n        if TYPE_CHECKING:\n            assert isinstance(self.irsb.arch, LibvexArch)\n            assert isinstance(self.data, CLiftSource)\n        try:\n            _libvex_lock.acquire()\n\n            pvc.log_level = log.getEffectiveLevel()\n            vex_arch = getattr(pvc, self.irsb.arch.vex_arch, None)\n            assert vex_arch is not None\n\n            if self.bytes_offset is None:\n                self.bytes_offset = 0\n\n            if self.max_bytes is None or self.max_bytes > VEX_MAX_BYTES:\n                max_bytes = VEX_MAX_BYTES\n            else:\n                max_bytes = self.max_bytes\n\n            if self.max_inst is None or self.max_inst > VEX_MAX_INSTRUCTIONS:\n                max_inst = VEX_MAX_INSTRUCTIONS\n            else:\n                max_inst = self.max_inst\n\n            strict_block_end = self.strict_block_end\n            if strict_block_end is None:\n                strict_block_end = True\n\n            if self.cross_insn_opt:\n                px_control = VexRegisterUpdates.VexRegUpdUnwindregsAtMemAccess\n            else:\n                px_control = VexRegisterUpdates.VexRegUpdLdAllregsAtEachInsn\n\n            self.irsb.arch.vex_archinfo[\"hwcache_info\"][\"caches\"] = ffi.NULL\n            lift_r = pvc.vex_lift(\n                vex_arch,\n                self.irsb.arch.vex_archinfo,\n                self.data + self.bytes_offset,\n                self.irsb.addr,\n                max_inst,\n                max_bytes,\n                self.opt_level,\n                self.traceflags,\n                self.allow_arch_optimizations,\n                strict_block_end,\n                1 if self.collect_data_refs else 0,\n                1 if self.load_from_ro_regions else 0,\n                1 if self.const_prop else 0,\n                px_control,\n                self.bytes_offset,\n            )\n            log_str = self.get_vex_log()\n            if lift_r == ffi.NULL:\n                raise LiftingException(\"libvex: unknown error\" if log_str is None else log_str)\n            else:\n                if log_str is not None:\n                    log.debug(log_str)\n\n            self.irsb._from_c(lift_r, skip_stmts=self.skip_stmts)\n            if self.irsb.size == 0:\n                log.debug(\"raising lifting exception\")\n                raise LiftingException(\"libvex: could not decode any instructions @ 0x%x\" % self.addr)\n        finally:\n            _libvex_lock.release()\n            self.irsb.arch.vex_archinfo[\"hwcache_info\"][\"caches\"] = None\n"
  },
  {
    "path": "pyvex/lifting/lift_function.py",
    "content": "import logging\nfrom collections import defaultdict\nfrom typing import DefaultDict\n\nfrom pyvex import const\nfrom pyvex.block import IRSB\nfrom pyvex.const import vex_int_class\nfrom pyvex.errors import LiftingException, NeedStatementsNotification, PyVEXError, SkipStatementsError\nfrom pyvex.expr import Const\nfrom pyvex.native import ffi\nfrom pyvex.types import LiftSource, PyLiftSource\n\nfrom .lifter import Lifter\nfrom .post_processor import Postprocessor\n\nlog = logging.getLogger(__name__)\n\nlifters: DefaultDict[str, list[type[Lifter]]] = defaultdict(list)\npostprocessors: DefaultDict[str, list[type[Postprocessor]]] = defaultdict(list)\n\n\ndef lift(\n    data: LiftSource,\n    addr,\n    arch,\n    max_bytes=None,\n    max_inst=None,\n    bytes_offset=0,\n    opt_level=1,\n    traceflags=0,\n    strict_block_end=True,\n    inner=False,\n    skip_stmts=False,\n    collect_data_refs=False,\n    cross_insn_opt=True,\n    load_from_ro_regions=False,\n    const_prop=False,\n):\n    \"\"\"\n    Recursively lifts blocks using the registered lifters and postprocessors. Tries each lifter in the order in\n    which they are registered on the data to lift.\n\n    If a lifter raises a LiftingException on the data, it is skipped.\n    If it succeeds and returns a block with a jumpkind of Ijk_NoDecode, all of the lifters are tried on the rest\n    of the data and if they work, their output is appended to the first block.\n\n    :param arch:            The arch to lift the data as.\n    :param addr:            The starting address of the block. Effects the IMarks.\n    :param data:            The bytes to lift as either a python string of bytes or a cffi buffer object.\n    :param max_bytes:       The maximum number of bytes to lift. If set to None, no byte limit is used.\n    :param max_inst:        The maximum number of instructions to lift. If set to None, no instruction limit is used.\n    :param bytes_offset:    The offset into `data` to start lifting at.\n    :param opt_level:       The level of optimization to apply to the IR, -1 through 2. -1 is the strictest\n                            unoptimized level, 0 is unoptimized but will perform some lookahead/lookbehind\n                            optimizations, 1 performs constant propogation, and 2 performs loop unrolling,\n                            which honestly doesn't make much sense in the context of pyvex. The default is 1.\n    :param traceflags:      The libVEX traceflags, controlling VEX debug prints.\n\n    .. note:: Explicitly specifying the number of instructions to lift (`max_inst`) may not always work\n              exactly as expected. For example, on MIPS, it is meaningless to lift a branch or jump\n              instruction without its delay slot. VEX attempts to Do The Right Thing by possibly decoding\n              fewer instructions than requested. Specifically, this means that lifting a branch or jump\n              on MIPS as a single instruction (`max_inst=1`) will result in an empty IRSB, and subsequent\n              attempts to run this block will raise `SimIRSBError('Empty IRSB passed to SimIRSB.')`.\n\n    .. note:: If no instruction and byte limit is used, pyvex will continue lifting the block until the block\n              ends properly or until it runs out of data to lift.\n    \"\"\"\n    if max_bytes is not None and max_bytes <= 0:\n        raise PyVEXError(\"Cannot lift block with no data (max_bytes <= 0)\")\n\n    if not data:\n        raise PyVEXError(\"Cannot lift block with no data (data is empty)\")\n\n    if isinstance(data, str):\n        raise TypeError(\"Cannot pass unicode string as data to lifter\")\n\n    py_data: PyLiftSource | None\n    if isinstance(data, (bytes, bytearray, memoryview)):\n        py_data = data\n        c_data = None\n    else:\n        if max_bytes is None:\n            raise PyVEXError(\"Cannot lift block with ffi pointer and no size (max_bytes is None)\")\n        c_data = data\n        py_data = None\n\n    allow_arch_optimizations = True\n    # In order to attempt to preserve the property that\n    # VEX lifts the same bytes to the same IR at all times when optimizations are disabled\n    # we hack off all of VEX's non-IROpt optimizations when opt_level == -1.\n    # This is intended to enable comparisons of the lifted IR between code that happens to be\n    # found in different contexts.\n    if opt_level < 0:\n        allow_arch_optimizations = False\n        opt_level = 0\n\n    for lifter in lifters[arch.name]:\n        try:\n            u_data: LiftSource = data\n            if lifter.REQUIRE_DATA_C:\n                if c_data is None:\n                    assert py_data is not None\n                    if isinstance(py_data, (bytearray, memoryview)):\n                        u_data = ffi.from_buffer(ffi.BVoidP, py_data)\n                    else:\n                        u_data = ffi.from_buffer(ffi.BVoidP, py_data + b\"\\0\" * 8)\n                    max_bytes = min(len(py_data), max_bytes) if max_bytes is not None else len(py_data)\n                else:\n                    u_data = c_data\n                skip = 0\n            elif lifter.REQUIRE_DATA_PY:\n                if bytes_offset and arch.name.startswith(\"ARM\") and (addr & 1) == 1:\n                    skip = bytes_offset - 1\n                else:\n                    skip = bytes_offset\n                if py_data is None:\n                    assert c_data is not None\n                    if max_bytes is None:\n                        log.debug(\"Cannot create py_data from c_data when no max length is given\")\n                        continue\n                    u_data = ffi.buffer(c_data + skip, max_bytes)[:]\n                else:\n                    if max_bytes is None:\n                        u_data = py_data[skip:]\n                    else:\n                        u_data = py_data[skip : skip + max_bytes]\n            else:\n                raise RuntimeError(\n                    \"Incorrect lifter configuration. What type of data does %s expect?\" % lifter.__class__\n                )\n\n            try:\n                final_irsb = lifter(arch, addr).lift(\n                    u_data,\n                    bytes_offset - skip,\n                    max_bytes,\n                    max_inst,\n                    opt_level,\n                    traceflags,\n                    allow_arch_optimizations,\n                    strict_block_end,\n                    skip_stmts,\n                    collect_data_refs=collect_data_refs,\n                    cross_insn_opt=cross_insn_opt,\n                    load_from_ro_regions=load_from_ro_regions,\n                    const_prop=const_prop,\n                )\n            except SkipStatementsError:\n                assert skip_stmts is True\n                final_irsb = lifter(arch, addr).lift(\n                    u_data,\n                    bytes_offset - skip,\n                    max_bytes,\n                    max_inst,\n                    opt_level,\n                    traceflags,\n                    allow_arch_optimizations,\n                    strict_block_end,\n                    skip_stmts=False,\n                    collect_data_refs=collect_data_refs,\n                    cross_insn_opt=cross_insn_opt,\n                    load_from_ro_regions=load_from_ro_regions,\n                    const_prop=const_prop,\n                )\n            break\n        except LiftingException as ex:\n            log.debug(\"Lifting Exception: %s\", str(ex))\n            continue\n    else:\n        final_irsb = IRSB.empty_block(\n            arch,\n            addr,\n            size=0,\n            nxt=Const(const.vex_int_class(arch.bits)(addr)),\n            jumpkind=\"Ijk_NoDecode\",\n        )\n        final_irsb.invalidate_direct_next()\n        return final_irsb\n\n    if final_irsb.size > 0 and final_irsb.jumpkind == \"Ijk_NoDecode\":\n        # We have decoded a few bytes before we hit an undecodeable instruction.\n\n        # Determine if this is an intentional NoDecode, like the ud2 instruction on AMD64\n        nodecode_addr_expr = final_irsb.next\n        if type(nodecode_addr_expr) is Const:\n            nodecode_addr = nodecode_addr_expr.con.value\n            next_irsb_start_addr = addr + final_irsb.size\n            if nodecode_addr != next_irsb_start_addr:\n                # The last instruction of the IRSB has a non-zero length. This is an intentional NoDecode.\n                # The very last instruction has been decoded\n                final_irsb.jumpkind = \"Ijk_NoDecode\"\n                final_irsb.next = final_irsb.next\n                final_irsb.invalidate_direct_next()\n                return final_irsb\n\n        # Decode more bytes\n        if skip_stmts:\n            # When gymrat will be invoked, we will merge future basic blocks to the current basic block. In this case,\n            # statements are usually required.\n            # TODO: In the future, we may further optimize it to handle cases where getting statements in gymrat is not\n            # TODO: required.\n            return lift(\n                data,\n                addr,\n                arch,\n                max_bytes=max_bytes,\n                max_inst=max_inst,\n                bytes_offset=bytes_offset,\n                opt_level=opt_level,\n                traceflags=traceflags,\n                strict_block_end=strict_block_end,\n                skip_stmts=False,\n                collect_data_refs=collect_data_refs,\n                load_from_ro_regions=load_from_ro_regions,\n                const_prop=const_prop,\n            )\n\n        next_addr = addr + final_irsb.size\n        if max_bytes is not None:\n            max_bytes -= final_irsb.size\n        if isinstance(data, (bytes, bytearray, memoryview)):\n            data_left = data[final_irsb.size :]\n        else:\n            data_left = data + final_irsb.size\n        if max_inst is not None:\n            max_inst -= final_irsb.instructions\n        if (max_bytes is None or max_bytes > 0) and (max_inst is None or max_inst > 0) and data_left:\n            more_irsb = lift(\n                data_left,\n                next_addr,\n                arch,\n                max_bytes=max_bytes,\n                max_inst=max_inst,\n                bytes_offset=bytes_offset,\n                opt_level=opt_level,\n                traceflags=traceflags,\n                strict_block_end=strict_block_end,\n                inner=True,\n                skip_stmts=False,\n                collect_data_refs=collect_data_refs,\n                load_from_ro_regions=load_from_ro_regions,\n                const_prop=const_prop,\n            )\n            if more_irsb.size:\n                # Successfully decoded more bytes\n                final_irsb.extend(more_irsb)\n        elif max_bytes == 0:\n            # We have no more bytes left. Mark the jumpkind of the IRSB as Ijk_Boring\n            if final_irsb.size > 0 and final_irsb.jumpkind == \"Ijk_NoDecode\":\n                final_irsb.jumpkind = \"Ijk_Boring\"\n                final_irsb.next = Const(vex_int_class(arch.bits)(final_irsb.addr + final_irsb.size))\n\n    if not inner:\n        for postprocessor in postprocessors[arch.name]:\n            try:\n                postprocessor(final_irsb).postprocess()\n            except NeedStatementsNotification as e:\n                # The post-processor cannot work without statements. Re-lift the current block with skip_stmts=False\n                if not skip_stmts:\n                    # sanity check\n                    # Why does the post-processor raise NeedStatementsNotification when skip_stmts is False?\n                    raise TypeError(\n                        \"Bad post-processor %s: \"\n                        \"NeedStatementsNotification is raised when statements are available.\" % postprocessor.__class__\n                    ) from e\n\n                # Re-lift the current IRSB\n                return lift(\n                    data,\n                    addr,\n                    arch,\n                    max_bytes=max_bytes,\n                    max_inst=max_inst,\n                    bytes_offset=bytes_offset,\n                    opt_level=opt_level,\n                    traceflags=traceflags,\n                    strict_block_end=strict_block_end,\n                    inner=inner,\n                    skip_stmts=False,\n                    collect_data_refs=collect_data_refs,\n                    load_from_ro_regions=load_from_ro_regions,\n                    const_prop=const_prop,\n                )\n            except LiftingException:\n                continue\n\n    return final_irsb\n\n\ndef register(lifter, arch_name):\n    \"\"\"\n    Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order\n    in which they are registered. Postprocessors will be run in registration order.\n\n    :param lifter:       The Lifter or Postprocessor to register\n    :vartype lifter:     :class:`Lifter` or :class:`Postprocessor`\n    \"\"\"\n    if issubclass(lifter, Lifter):\n        log.debug(\"Registering lifter %s for architecture %s.\", lifter.__name__, arch_name)\n        lifters[arch_name].append(lifter)\n    if issubclass(lifter, Postprocessor):\n        log.debug(\"Registering postprocessor %s for architecture %s.\", lifter.__name__, arch_name)\n        postprocessors[arch_name].append(lifter)\n"
  },
  {
    "path": "pyvex/lifting/lifter.py",
    "content": "from pyvex.block import IRSB\nfrom pyvex.types import Arch, LiftSource\n\n# pylint:disable=attribute-defined-outside-init\n\n\nclass Lifter:\n    __slots__ = (\n        \"data\",\n        \"bytes_offset\",\n        \"opt_level\",\n        \"traceflags\",\n        \"allow_arch_optimizations\",\n        \"strict_block_end\",\n        \"collect_data_refs\",\n        \"max_inst\",\n        \"max_bytes\",\n        \"skip_stmts\",\n        \"irsb\",\n        \"arch\",\n        \"addr\",\n        \"cross_insn_opt\",\n        \"load_from_ro_regions\",\n        \"const_prop\",\n        \"disasm\",\n        \"dump_irsb\",\n    )\n\n    \"\"\"\n    A lifter is a class of methods for processing a block.\n\n    :ivar data:             The bytes to lift as either a python string of bytes or a cffi buffer object.\n    :ivar bytes_offset:     The offset into `data` to start lifting at.\n    :ivar max_bytes:        The maximum number of bytes to lift. If set to None, no byte limit is used.\n    :ivar max_inst:         The maximum number of instructions to lift. If set to None, no instruction limit is used.\n    :ivar opt_level:        The level of optimization to apply to the IR, 0-2. Most likely will be ignored in any lifter\n                            other then LibVEX.\n    :ivar traceflags:       The libVEX traceflags, controlling VEX debug prints. Most likely will be ignored in any\n                            lifter other than LibVEX.\n    :ivar allow_arch_optimizations:   Should the LibVEX lifter be allowed to perform lift-time preprocessing\n                            optimizations (e.g., lookback ITSTATE optimization on THUMB)\n                            Most likely will be ignored in any lifter other than LibVEX.\n    :ivar strict_block_end: Should the LibVEX arm-thumb split block at some instructions, for example CB{N}Z.\n    :ivar skip_stmts:       Should LibVEX ignore statements.\n    \"\"\"\n    REQUIRE_DATA_C = False\n    REQUIRE_DATA_PY = False\n\n    def __init__(self, arch: Arch, addr: int):\n        self.arch: Arch = arch\n        self.addr: int = addr\n\n    def lift(\n        self,\n        data: LiftSource,\n        bytes_offset: int | None = None,\n        max_bytes: int | None = None,\n        max_inst: int | None = None,\n        opt_level: int | float = 1,\n        traceflags: int | None = None,\n        allow_arch_optimizations: bool | None = None,\n        strict_block_end: bool | None = None,\n        skip_stmts: bool = False,\n        collect_data_refs: bool = False,\n        cross_insn_opt: bool = True,\n        load_from_ro_regions: bool = False,\n        const_prop: bool = False,\n        disasm: bool = False,\n        dump_irsb: bool = False,\n    ):\n        \"\"\"\n        Wrapper around the `_lift` method on Lifters. Should not be overridden in child classes.\n\n        :param data:                The bytes to lift as either a python string of bytes or a cffi buffer object.\n        :param bytes_offset:        The offset into `data` to start lifting at.\n        :param max_bytes:           The maximum number of bytes to lift. If set to None, no byte limit is used.\n        :param max_inst:            The maximum number of instructions to lift. If set to None, no instruction limit is\n                                    used.\n        :param opt_level:           The level of optimization to apply to the IR, 0-2. Most likely will be ignored in\n                                    any lifter other then LibVEX.\n        :param traceflags:          The libVEX traceflags, controlling VEX debug prints. Most likely will be ignored in\n                                    any lifter other than LibVEX.\n        :param allow_arch_optimizations:   Should the LibVEX lifter be allowed to perform lift-time preprocessing\n                                    optimizations (e.g., lookback ITSTATE optimization on THUMB) Most likely will be\n                                    ignored in any lifter other than LibVEX.\n        :param strict_block_end:    Should the LibVEX arm-thumb split block at some instructions, for example CB{N}Z.\n        :param skip_stmts:          Should the lifter skip transferring IRStmts from C to Python.\n        :param collect_data_refs:   Should the LibVEX lifter collect data references in C.\n        :param cross_insn_opt:      If cross-instruction-boundary optimizations are allowed or not.\n        :param disasm:              Should the GymratLifter generate disassembly during lifting.\n        :param dump_irsb:           Should the GymratLifter log the lifted IRSB.\n        \"\"\"\n        irsb: IRSB = IRSB.empty_block(self.arch, self.addr)\n        self.data = data\n        self.bytes_offset = bytes_offset\n        self.opt_level = opt_level\n        self.traceflags = traceflags\n        self.allow_arch_optimizations = allow_arch_optimizations\n        self.strict_block_end = strict_block_end\n        self.collect_data_refs = collect_data_refs\n        self.max_inst = max_inst\n        self.max_bytes = max_bytes\n        self.skip_stmts = skip_stmts\n        self.irsb = irsb\n        self.cross_insn_opt = cross_insn_opt\n        self.load_from_ro_regions = load_from_ro_regions\n        self.const_prop = const_prop\n        self.disasm = disasm\n        self.dump_irsb = dump_irsb\n        self._lift()\n        return self.irsb\n\n    def _lift(self):\n        \"\"\"\n        Lifts the data using the information passed into _lift. Should be overridden in child classes.\n\n        Should set the lifted IRSB to self.irsb.\n        If a lifter raises a LiftingException on the data, this signals that the lifter cannot lift this data and arch\n        and the lifter is skipped.\n        If a lifter can lift any amount of data, it should lift it and return the lifted block with a jumpkind of\n        Ijk_NoDecode, signalling to pyvex that other lifters should be used on the undecodable data.\n\n        \"\"\"\n        raise NotImplementedError()\n"
  },
  {
    "path": "pyvex/lifting/post_processor.py",
    "content": "#\n# The post-processor base class\n#\n\n\nclass Postprocessor:\n    def __init__(self, irsb):\n        self.irsb = irsb\n\n    def postprocess(self):\n        \"\"\"\n        Modify the irsb\n\n        All of the postprocessors will be used in the order that they are registered\n        \"\"\"\n        pass\n"
  },
  {
    "path": "pyvex/lifting/util/__init__.py",
    "content": "from .instr_helper import Instruction\nfrom .lifter_helper import GymratLifter, ParseError\nfrom .syntax_wrapper import VexValue\nfrom .vex_helper import JumpKind, Type\n\n__all__ = [\n    \"Type\",\n    \"JumpKind\",\n    \"VexValue\",\n    \"ParseError\",\n    \"Instruction\",\n    \"GymratLifter\",\n    \"ParseError\",\n]\n"
  },
  {
    "path": "pyvex/lifting/util/instr_helper.py",
    "content": "import abc\nimport string\n\nimport bitstring\n\nfrom pyvex.expr import IRExpr, RdTmp\n\nfrom .lifter_helper import ParseError\nfrom .syntax_wrapper import VexValue\nfrom .vex_helper import IRSBCustomizer, JumpKind, vex_int_class\n\n\nclass Instruction(metaclass=abc.ABCMeta):\n    \"\"\"\n    Base class for an Instruction.\n\n    You should make a subclass of this for each instruction you want to lift. These classes will contain the \"semantics\"\n    of the instruction, that is, what it _does_, in terms of the VEX IR.\n\n    You may want to subclass this for your architecture, and add arch-specific handling for parsing, argument\n    resolution, etc., and have instructions subclass that instead.\n\n    The core parsing functionality is done via ``bin_format``. Each instruction should be a subclass of ``Instruction``\n    and will be parsed by comparing bits in the provided bitstream to symbols in the ``bin_format`` member of the class.\n    \"Bin formats\" are strings of symbols, like those you'd find in an ISA document, such as \"0010rrrrddddffmm\"\n    0 or 1 specify hard-coded bits that must match for an instruction to match.\n    Any letters specify arguments, grouped by letter, which will be parsed and provided as bitstrings in the ``data``\n    member of the class as a dictionary.\n    So, in our example, the bits ``0010110101101001``, applied to format string ``0010rrrrddddffmm``\n    will result in the following in ``self.data``:\n\n        {'r': '1101',\n         'd': '0110',\n         'f': '10',\n         'm': '01'}\n\n    Implement compute_result to provide the \"meat\" of what your instruction does.\n    You can also implement it in your arch-specific subclass of ``Instruction``, to handle things common to all\n    instructions, and provide instruction implementations elsewhere.\n\n    We provide the ``VexValue`` syntax wrapper to make expressing instruction semantics easy.\n    You first convert the bitstring arguments into ``VexValue``s using the provided convenience methods\n    (``self.get/put/load/store/etc.``)\n    This loads the register from the actual registers into a temporary value we can work with.\n    You can then write it back to a register when you're done.\n    For example, if you have the register in ``r``, as above, you can make a ``VexValue`` like this:\n\n        r = int(self.data['r'], 2) # we get bits corresponding to `r` bits and convert it to an int\n        r_vv = self.get(r, Type.int_32)\n\n    If you then had an instruction to increment ``r``, you could simply:\n\n        return r_vv += 1\n\n    You could then write it back to the register like this:\n\n        self.put(r_vv, r)\n\n    Note that most architectures have special flags that get set differently for each instruction, make sure to\n    implement those as well (override ``set_flags()`` )\n\n    Override ``parse()`` to extend parsing.\n    For example, in MSP430, this allows us to grab extra words from the bitstream\n    when extra immediate words are present.\n\n    All architectures are different enough that there's no magic recipe for how to write a lifter.\n    See the examples provided by gymrat for ideas of how to use this to build your own lifters quickly and easily.\n    \"\"\"\n\n    data: dict[str, str]\n    irsb_c: IRSBCustomizer\n\n    def __init__(self, bitstrm, arch, addr):\n        \"\"\"\n        Create an instance of the instruction\n\n        :param irsb_c: The IRSBCustomizer to put VEX instructions into\n        :param bitstrm: The bitstream to decode instructions from\n        :param addr: The address of the instruction to be lifted, used only for jumps and branches\n        \"\"\"\n        self.addr = addr\n        self.arch = arch\n        self.bitwidth = len(self.bin_format)\n        self.data = self.parse(bitstrm)\n\n    @property\n    @abc.abstractmethod\n    def bin_format(self) -> str:\n        \"\"\"\n        Read the documentation of the class to understand what a bin format string is\n\n        :return: str bin format string\n        \"\"\"\n\n    @property\n    @abc.abstractmethod\n    def name(self) -> str:\n        \"\"\"\n        Name of the instruction\n\n        Can be useful to name the instruction when there's an error related to it\n        \"\"\"\n\n    def __call__(self, irsb_c, past_instructions, future_instructions):\n        self.lift(irsb_c, past_instructions, future_instructions)\n\n    def mark_instruction_start(self):\n        self.irsb_c.imark(self.addr, self.bytewidth, 0)\n\n    def fetch_operands(self):  # pylint: disable=no-self-use\n        \"\"\"\n        Get the operands out of memory or registers\n        Return a tuple of operands for the instruction\n        \"\"\"\n        return ()\n\n    def lift(self, irsb_c: IRSBCustomizer, past_instructions, future_instructions):  # pylint: disable=unused-argument\n        \"\"\"\n        This is the main body of the \"lifting\" for the instruction.\n        This can/should be overridden to provide the general flow of how instructions in your arch work.\n        For example, in MSP430, this is:\n\n        - Figure out what your operands are by parsing the addressing, and load them into temporary registers\n        - Do the actual operation, and commit the result, if needed.\n        - Compute the flags\n        \"\"\"\n        self.irsb_c = irsb_c\n        # Always call this first!\n        self.mark_instruction_start()\n        # Then do the actual stuff.\n        inputs = self.fetch_operands()\n        retval = self.compute_result(*inputs)  # pylint: disable=assignment-from-none\n        if retval is not None:\n            self.commit_result(retval)\n        vals = list(inputs) + [retval]\n        self.compute_flags(*vals)\n\n    def commit_result(self, res):\n        \"\"\"\n        This where the result of the operation is written to a destination.\n        This happens only if compute_result does not return None, and happens before compute_flags is called.\n        Override this to specify how to write out the result.\n        The results of fetch_operands can be used to resolve various addressing modes for the write outward.\n        A common pattern is to return a function from fetch_operands which will be called here to perform the write.\n\n        :param args: A tuple of the results of fetch_operands and compute_result\n        \"\"\"\n\n    def compute_result(self, *args):  # pylint: disable=unused-argument,no-self-use\n        \"\"\"\n        This is where the actual operation performed by your instruction, excluding the calculation of flags, should be\n        performed.  Return the VexValue of the \"result\" of the instruction, which may\n        be used to calculate the flags later.\n        For example, for a simple add, with arguments src and dst, you can simply write:\n\n            return src + dst:\n\n        :param args:\n        :return: A VexValue containing the \"result\" of the operation.\n        \"\"\"\n        return None\n\n    def compute_flags(self, *args):\n        \"\"\"\n        Most CPU architectures have \"flags\" that should be computed for many instructions.\n        Override this to specify how that happens.  One common pattern is to define this method to call specifi methods\n        to update each flag, which can then be overriden in the actual classes for each instruction.\n        \"\"\"\n\n    def match_instruction(self, data, bitstrm):  # pylint: disable=unused-argument,no-self-use\n        \"\"\"\n        Override this to extend the parsing functionality.\n        This is great for if your arch has instruction \"formats\" that have an opcode that has to match.\n\n        :param data:\n        :param bitstrm:\n        :return: data\n        \"\"\"\n        return data\n\n    def parse(self, bitstrm):\n        if self.arch.instruction_endness == \"Iend_LE\":\n            # This arch stores its instructions in memory endian-flipped compared to the ISA.\n            # To enable natural lifter-writing, we let the user write them like in the manual, and correct for\n            # endness here.\n            instr_bits = self._load_le_instr(bitstrm, self.bitwidth)\n        else:\n            instr_bits = bitstrm.peek(\"bin:%d\" % self.bitwidth)\n\n        data = {c: \"\" for c in self.bin_format if c in string.ascii_letters}\n        for c, b in zip(self.bin_format, instr_bits):\n            if c in \"01\":\n                if b != c:\n                    raise ParseError(\"Mismatch between format bit %c and instruction bit %c\" % (c, b))\n            elif c in string.ascii_letters:\n                data[c] += b\n            else:\n                raise ValueError(\"Invalid bin_format character %c\" % c)\n\n        # Hook here for extra matching functionality\n        if hasattr(self, \"match_instruction\"):\n            # Should raise if it's not right\n            self.match_instruction(data, bitstrm)\n\n        # Use up the bits once we're sure it's right\n        self.rawbits = bitstrm.read(\"hex:%d\" % self.bitwidth)\n\n        # Hook here for extra parsing functionality (e.g., trailers)\n        if hasattr(self, \"_extra_parsing\"):\n            data = self._extra_parsing(data, bitstrm)  # pylint: disable=no-member\n\n        return data\n\n    @property\n    def bytewidth(self):\n        if self.bitwidth % self.arch.byte_width != 0:\n            raise ValueError(\"Instruction is not a multiple of bytes wide!\")\n        return self.bitwidth // self.arch.byte_width\n\n    def disassemble(self):\n        \"\"\"\n        Return the disassembly of this instruction, as a string.\n        Override this in subclasses.\n\n        :return: The address (self.addr), the instruction's name, and a list of its operands, as strings\n        \"\"\"\n        return self.addr, \"UNK\", [self.rawbits]\n\n    # These methods should be called in subclasses to do register and memory operations\n\n    def load(self, addr, ty):\n        \"\"\"\n        Load a value from memory into a VEX temporary register.\n\n        :param addr: The VexValue containing the addr to load from.\n        :param ty: The Type of the resulting data\n        :return: a VexValue\n        \"\"\"\n        rdt = self.irsb_c.load(addr.rdt, ty)\n        return VexValue(self.irsb_c, rdt)\n\n    def constant(self, val, ty):\n        \"\"\"\n        Creates a constant as a VexValue\n\n        :param val: The value, as an integer\n        :param ty: The type of the resulting VexValue\n        :return: a VexValue\n        \"\"\"\n        if isinstance(val, VexValue) and not isinstance(val, IRExpr):\n            raise Exception(\"Constant cannot be made from VexValue or IRExpr\")\n        rdt = self.irsb_c.mkconst(val, ty)\n        return VexValue(self.irsb_c, rdt)\n\n    @staticmethod\n    def _lookup_register(arch, reg):\n        # TODO: This is a hack to make it work with archinfo where we use\n        # register indicies instead of names\n        if isinstance(reg, int):\n            if hasattr(arch, \"register_index\"):\n                reg = arch.register_index[reg]\n            else:\n                reg = arch.register_list[reg].name\n        return arch.get_register_offset(reg)\n\n    def get(self, reg, ty):\n        \"\"\"\n        Load a value from a machine register into a VEX temporary register.\n        All values must be loaded out of registers before they can be used with operations, etc\n        and stored back into them when the instruction is over.  See Put().\n\n        :param reg: Register number as an integer, or register string name\n        :param ty: The Type to use.\n        :return: A VexValue of the gotten value.\n        \"\"\"\n        offset = self._lookup_register(self.irsb_c.irsb.arch, reg)\n        if offset == self.irsb_c.irsb.arch.ip_offset:\n            return self.constant(self.addr, ty)\n        rdt = self.irsb_c.rdreg(offset, ty)\n        return VexValue(self.irsb_c, rdt)\n\n    def put(self, val, reg):\n        \"\"\"\n        Puts a value from a VEX temporary register into a machine register.\n        This is how the results of operations done to registers get committed to the machine's state.\n\n        :param val: The VexValue to store (Want to store a constant? See Constant() first)\n        :param reg: The integer register number to store into, or register name\n        :return: None\n        \"\"\"\n        offset = self._lookup_register(self.irsb_c.irsb.arch, reg)\n        self.irsb_c.put(val.rdt, offset)\n\n    def put_conditional(self, cond, valiftrue, valiffalse, reg):\n        \"\"\"\n        Like put, except it checks a condition\n        to decide what to put in the destination register.\n\n        :param cond: The VexValue representing the logical expression for the condition\n            (if your expression only has constants, don't use this method!)\n        :param valiftrue: the VexValue to put in reg if cond evals as true\n        :param validfalse: the VexValue to put in reg if cond evals as false\n        :param reg: The integer register number to store into, or register name\n        :return: None\n        \"\"\"\n\n        val = self.irsb_c.ite(cond.rdt, valiftrue.rdt, valiffalse.rdt)\n        offset = self._lookup_register(self.irsb_c.irsb.arch, reg)\n        self.irsb_c.put(val, offset)\n\n    def store(self, val, addr):\n        \"\"\"\n        Store a VexValue in memory at the specified loaction.\n\n        :param val: The VexValue of the value to store\n        :param addr: The VexValue of the address to store into\n        :return: None\n        \"\"\"\n        self.irsb_c.store(addr.rdt, val.rdt)\n\n    def jump(self, condition, to_addr, jumpkind=JumpKind.Boring, ip_offset=None):\n        \"\"\"\n        Jump to a specified destination, under the specified condition.\n        Used for branches, jumps, calls, returns, etc.\n\n        :param condition: The VexValue representing the expression for the guard, or None for an unconditional jump\n        :param to_addr: The address to jump to.\n        :param jumpkind: The JumpKind to use.  See the VEX docs for what these are; you only need them for things\n            aren't normal jumps (e.g., calls, interrupts, program exits, etc etc)\n        :return: None\n        \"\"\"\n        to_addr_ty = None\n        if isinstance(to_addr, VexValue):\n            # Unpack a VV\n            to_addr_rdt = to_addr.rdt\n            to_addr_ty = to_addr.ty\n        elif isinstance(to_addr, int):\n            # Direct jump to an int, make an RdT and Ty\n            to_addr_ty = vex_int_class(self.irsb_c.irsb.arch.bits).type\n            to_addr = self.constant(to_addr, to_addr_ty)  # TODO archinfo may be changing\n            to_addr_rdt = to_addr.rdt\n        elif isinstance(to_addr, RdTmp):\n            # An RdT; just get the Ty of the arch's pointer type\n            to_addr_ty = vex_int_class(self.irsb_c.irsb.arch.bits).type\n            to_addr_rdt = to_addr\n        else:\n            raise TypeError(\"Jump destination has unknown type: \" + repr(type(to_addr)))\n        if not condition:\n            # This is the default exit.\n            self.irsb_c.irsb.jumpkind = jumpkind\n            self.irsb_c.irsb.next = to_addr_rdt\n        else:\n            # add another exit\n            # EDG says: We should make sure folks set ArchXYZ.ip_offset like they're supposed to\n            if ip_offset is None:\n                ip_offset = self.arch.ip_offset\n            assert ip_offset is not None\n\n            negated_condition_rdt = self.ite(condition, self.constant(0, condition.ty), self.constant(1, condition.ty))\n            direct_exit_target = self.constant(self.addr + (self.bitwidth // 8), to_addr_ty)\n            self.irsb_c.add_exit(negated_condition_rdt, direct_exit_target.rdt, jumpkind, ip_offset)\n            self.irsb_c.irsb.jumpkind = jumpkind\n            self.irsb_c.irsb.next = to_addr_rdt\n\n    def ite(self, cond, t, f):\n        return self.irsb_c.ite(cond.rdt, t.rdt, f.rdt)\n\n    def ccall(self, ret_type, func_name, args):\n        \"\"\"\n        Creates a CCall operation.\n        A CCall is a procedure that calculates a value at *runtime*, not at lift-time.\n        You can use these for flags, unresolvable jump targets, etc.\n        We caution you to avoid using them when at all possible though.\n\n        :param ret_type: The return type of the CCall\n        :param func_obj: The name of the helper function to call. If you're using angr, this should be added (or\n                         monkeypatched) into ``angr.engines.vex.claripy.ccall``.\n        :param args: List of arguments to the function\n        :return: A VexValue of the result.\n        \"\"\"\n\n        # Check the args to make sure they're the right type\n        list_args = list(args)\n        new_args = []\n        for arg in list_args:\n            if isinstance(arg, VexValue):\n                arg = arg.rdt\n            new_args.append(arg)\n        args = tuple(new_args)\n\n        cc = self.irsb_c.op_ccall(ret_type, func_name, args)\n        return VexValue(self.irsb_c, cc)\n\n    def dirty(self, ret_type, func_name, args) -> VexValue:\n        \"\"\"\n        Creates a dirty call operation.\n\n        These are like ccalls (clean calls) but their implementations are theoretically allowed to read or write to or\n        from any part of the state, making them a nightmare for static analysis to reason about. Avoid their use at all\n        costs.\n\n        :param ret_type:   The return type of the dirty call, or None if the dirty call doesn't return anything.\n        :param func_name:  The name of the helper function to call. If you're using angr, this should be added (or\n                           monkeypatched) into ``angr.engines.vex.heavy.dirty``.\n        :param args: List of arguments to the function\n        :return: A VexValue of the result.\n        \"\"\"\n\n        # Check the args to make sure they're the right type\n        list_args = list(args)\n        new_args = []\n        for arg in list_args:\n            if isinstance(arg, VexValue):\n                arg = arg.rdt\n            new_args.append(arg)\n        args = tuple(new_args)\n\n        rdt = self.irsb_c.dirty(ret_type, func_name, args)\n        return VexValue(self.irsb_c, rdt)\n\n    def _load_le_instr(self, bitstream: bitstring.ConstBitStream, numbits: int) -> str:\n        return bitstring.Bits(uint=bitstream.peek(\"uintle:%d\" % numbits), length=numbits).bin\n"
  },
  {
    "path": "pyvex/lifting/util/lifter_helper.py",
    "content": "import logging\nfrom typing import TYPE_CHECKING\n\nimport bitstring\n\nfrom pyvex.const import vex_int_class\nfrom pyvex.errors import LiftingException\nfrom pyvex.lifting.lifter import Lifter\n\nfrom .vex_helper import IRSBCustomizer, JumpKind\n\nif TYPE_CHECKING:\n    from .instr_helper import Instruction\n\nlog = logging.getLogger(__name__)\n\n\ndef is_empty(bitstrm):\n    try:\n        bitstrm.peek(1)\n        return False\n    except bitstring.ReadError:\n        return True\n\n\nclass ParseError(Exception):\n    pass\n\n\nclass GymratLifter(Lifter):\n    \"\"\"\n    This is a base class for lifters that use Gymrat.\n    For most architectures, all you need to do is subclass this, and set the property \"instructions\"\n    to be a list of classes that define each instruction.\n    By default, a lifter will decode instructions by attempting to instantiate every class until one works.\n    This will use an IRSBCustomizer, which will, if it succeeds, add the appropriate VEX instructions to a pyvex IRSB.\n    pyvex, when lifting a block of code for this architecture, will call the method \"lift\", which will produce the IRSB\n    of the lifted code.\n    \"\"\"\n\n    __slots__ = (\n        \"bitstrm\",\n        \"errors\",\n        \"thedata\",\n        \"disassembly\",\n    )\n\n    REQUIRE_DATA_PY = True\n    instrs: list[type[\"Instruction\"]]\n\n    def __init__(self, arch, addr):\n        super().__init__(arch, addr)\n        self.bitstrm = None\n        self.errors = None\n        self.thedata = None\n        self.disassembly = None\n\n    def create_bitstrm(self):\n        self.bitstrm = bitstring.ConstBitStream(bytes=self.thedata)\n\n    def _decode_next_instruction(self, addr):\n        # Try every instruction until one works\n        for possible_instr in self.instrs:\n            try:\n                log.debug(\"Trying %s\", possible_instr.name)\n                return possible_instr(self.bitstrm, self.irsb.arch, addr)\n            # a ParserError signals that this instruction did not match\n            # we need to try other instructions, so we ignore this error\n            except ParseError:\n                pass  # l.exception(repr(possible_instr))\n            # if we are out of input, ignore.\n            # there may be other, shorter instructions that still match,\n            # so we continue with the loop\n            except (bitstring.ReadError, bitstring.InterpretError):\n                pass\n\n        # If no instruction matches, log an error\n        errorstr = \"Unknown instruction at bit position %d\" % self.bitstrm.bitpos\n        log.debug(errorstr)\n        log.debug(\"Address: %#08x\" % addr)\n\n    def decode(self):\n        try:\n            self.create_bitstrm()\n            count = 0\n            disas = []\n            addr = self.irsb.addr\n            log.debug(\"Starting block at address: \" + hex(addr))\n            bytepos = self.bitstrm.bytepos\n\n            while not is_empty(self.bitstrm):\n                instr = self._decode_next_instruction(addr)\n                if not instr:\n                    break\n                disas.append(instr)\n                log.debug(\"Matched \" + instr.name)\n                addr += self.bitstrm.bytepos - bytepos\n                bytepos = self.bitstrm.bytepos\n                count += 1\n            return disas\n        except Exception as e:\n            self.errors = str(e)\n            log.exception(f\"Error decoding block at offset {bytepos:#x} (address {addr:#x}):\")\n            raise\n\n    def _lift(self):\n        self.thedata = (\n            self.data[: self.max_bytes]\n            if isinstance(self.data, (bytes, bytearray, memoryview))\n            else self.data[: self.max_bytes].encode()\n        )\n        log.debug(repr(self.thedata))\n        instructions = self.decode()\n\n        if self.disasm:\n            self.disassembly = [instr.disassemble() for instr in instructions]\n        self.irsb.jumpkind = JumpKind.Invalid\n        irsb_c = IRSBCustomizer(self.irsb)\n        log.debug(\"Decoding complete.\")\n        for i, instr in enumerate(instructions[: self.max_inst]):\n            log.debug(\"Lifting instruction %s\", instr.name)\n            instr(irsb_c, instructions[:i], instructions[i + 1 :])\n            if irsb_c.irsb.jumpkind != JumpKind.Invalid:\n                break\n            if (i + 1) == self.max_inst:  # if we are on our last iteration\n                instr.jump(None, irsb_c.irsb.addr + irsb_c.irsb.size)\n                break\n        else:\n            if len(irsb_c.irsb.statements) == 0:\n                raise LiftingException(\"Could not decode any instructions\")\n            irsb_c.irsb.jumpkind = JumpKind.NoDecode\n            dst = irsb_c.irsb.addr + irsb_c.irsb.size\n            dst_ty = vex_int_class(irsb_c.irsb.arch.bits).type\n            irsb_c.irsb.next = irsb_c.mkconst(dst, dst_ty)\n        log.debug(str(self.irsb))\n        if self.dump_irsb:\n            self.irsb.pp()\n        return self.irsb\n\n    def pp_disas(self):\n        disasstr = \"\"\n        insts = self.disassemble()\n        for addr, name, args in insts:\n            args_str = \",\".join(str(a) for a in args)\n            disasstr += f\"{addr:#08x}:\\t{name} {args_str}\\n\"\n        print(disasstr)\n\n    def error(self):\n        return self.errors\n\n    def disassemble(self):\n        if self.disassembly is None:\n            self.lift(self.data, disasm=True)\n        return self.disassembly\n"
  },
  {
    "path": "pyvex/lifting/util/syntax_wrapper.py",
    "content": "import functools\nfrom typing import Union\n\nfrom pyvex.const import get_type_size\nfrom pyvex.expr import Const, IRExpr, RdTmp\n\nfrom .vex_helper import IRSBCustomizer, Type\n\n\ndef checkparams(rhstype=None):\n    def decorator(fn):\n        @functools.wraps(fn)\n        def inner_decorator(self, *args, **kwargs):\n            irsb_cs = {a.irsb_c for a in list(args) + [self] if isinstance(a, VexValue)}  # pylint: disable=no-member\n            assert len(irsb_cs) == 1, \"All VexValues must belong to the same irsb_c\"\n            args = list(args)\n            for idx, arg in enumerate(args):\n                if isinstance(arg, int):\n                    thetype = rhstype if rhstype else self.ty\n                    args[idx] = VexValue.Constant(self.irsb_c, arg, thetype)\n                elif not isinstance(arg, VexValue):\n                    raise Exception(\"Cannot convert param %s\" % str(arg))\n            args = tuple(args)\n            return fn(self, *args, **kwargs)\n\n        return inner_decorator\n\n    return decorator\n\n\ndef vvifyresults(f):\n    @functools.wraps(f)\n    def decor(self, *args, **kwargs):\n        returned = f(self, *args, **kwargs)\n        assert isinstance(returned, RdTmp) or isinstance(returned, Const)\n        return VexValue(self.irsb_c, returned)\n\n    return decor\n\n\nclass VexValue:\n    def __init__(self, irsb_c: \"IRSBCustomizer\", rdt: \"Union[RdTmp, Const]\", signed=False):\n        self.irsb_c = irsb_c\n        self.ty = self.irsb_c.get_type(rdt)\n        self.rdt = rdt\n        self.width = get_type_size(self.ty)\n        self._is_signed = signed\n\n    @property\n    def value(self):\n        if isinstance(self.rdt, Const):\n            return self.rdt.con.value\n        else:\n            raise ValueError(\"Non-constant VexValue has no value property\")\n\n    @property\n    def signed(self):\n        return VexValue(self.irsb_c, self.rdt, True)\n\n    @vvifyresults\n    def widen_unsigned(self, ty):\n        return self.irsb_c.op_widen_int_unsigned(self.rdt, ty)\n\n    @vvifyresults\n    def cast_to(self, ty, signed=False, high=False):\n        return self.irsb_c.cast_to(self.rdt, ty, signed=signed, high=high)\n\n    @vvifyresults\n    def widen_signed(self, ty):\n        return self.irsb_c.op_widen_int_signed(self.rdt, ty)\n\n    @vvifyresults\n    def narrow_high(self, ty):\n        return self.irsb_c.op_narrow_int(self.rdt, ty, high_half=True)\n\n    @vvifyresults\n    def narrow_low(self, ty):\n        return self.irsb_c.op_narrow_int(self.rdt, ty, high_half=False)\n\n    # TODO at some point extend this to Vex nonconstants\n    def __getitem__(self, idx):\n        def getb(i):\n            return VexValue(self.irsb_c, self.irsb_c.get_bit(self.rdt, i))\n\n        def makeconstant(x):\n            return VexValue.Constant(self.irsb_c, x, Type.int_8).rdt\n\n        if not isinstance(idx, slice):\n            actualindex = slice(idx).indices(self.width)[1]\n            return getb(makeconstant(actualindex))\n        else:\n            return [getb(makeconstant(i)) for i in range(*idx.indices(self.width))]\n\n    def __setitem__(self, idx, bval):\n        setted = self.set_bit(idx, bval)\n        self.__init__(setted.irsb_c, setted.rdt)\n\n    @checkparams(rhstype=Type.int_8)\n    @vvifyresults\n    def set_bit(self, idx, bval):\n        return self.irsb_c.set_bit(self.rdt, idx.rdt, bval.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def set_bits(self, idxsandvals):\n        return self.irsb_c.set_bits(self.rdt, [(i.cast_to(Type.int_8).rdt, b.rdt) for i, b in idxsandvals])\n\n    @checkparams()\n    @vvifyresults\n    def ite(self, iftrue, iffalse):\n        onebitcond = self.cast_to(Type.int_1)\n        return self.irsb_c.ite(onebitcond.rdt, iftrue.rdt, iffalse.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def sar(self, right):\n        \"\"\"\n        `v.sar(r)` should do arithmetic shift right of `v` by `r`\n\n        :param right:VexValue value to shift by\n        :return: VexValue - result of a shift\n        \"\"\"\n        return self.irsb_c.op_sar(self.rdt, right.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def __add__(self, right):\n        return self.irsb_c.op_add(self.rdt, right.rdt)\n\n    @checkparams()\n    def __radd__(self, left):\n        return self + left\n\n    @checkparams()\n    @vvifyresults\n    def __sub__(self, right):\n        return self.irsb_c.op_sub(self.rdt, right.rdt)\n\n    @checkparams()\n    def __rsub__(self, left):\n        return left - self\n\n    @checkparams()\n    @vvifyresults\n    def __div__(self, right):\n        if self._is_signed:\n            return self.irsb_c.op_sdiv(self.rdt, right.rdt)\n        else:\n            return self.irsb_c.op_udiv(self.rdt, right.rdt)\n\n    @checkparams()\n    def __rdiv__(self, left):\n        return left // self\n\n    @checkparams()\n    def __floordiv__(self, right):  # Note: nonprimitive\n        return self.__div__(right)\n\n    @checkparams()\n    def __rfloordiv__(self, left):\n        return left // self\n\n    @checkparams()\n    def __truediv__(self, right):  # Note: nonprimitive\n        return self / right\n\n    @checkparams()\n    def __rtruediv__(self, left):\n        return left.__truediv__(self)\n\n    @checkparams()\n    @vvifyresults\n    def __and__(self, right):\n        return self.irsb_c.op_and(self.rdt, right.rdt)\n\n    @checkparams()\n    def __rand__(self, left):\n        return left & self\n\n    @checkparams()\n    @vvifyresults\n    def __eq__(self, right):\n        return self.irsb_c.op_cmp_eq(self.rdt, right.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def __ne__(self, other):\n        return self.irsb_c.op_cmp_ne(self.rdt, other.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def __invert__(self):\n        return self.irsb_c.op_not(self.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def __le__(self, right):\n        if self._is_signed:\n            return self.irsb_c.op_cmp_sle(self.rdt, right.rdt)\n        else:\n            return self.irsb_c.op_cmp_ule(self.rdt, right.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def __gt__(self, other):\n        if self._is_signed:\n            return self.irsb_c.op_cmp_sgt(self.rdt, other.rdt)\n        else:\n            return self.irsb_c.op_cmp_ugt(self.rdt, other.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def __ge__(self, right):\n        if self._is_signed:\n            return self.irsb_c.op_cmp_sge(self.rdt, right.rdt)\n        else:\n            return self.irsb_c.op_cmp_uge(self.rdt, right.rdt)\n\n    @checkparams(rhstype=Type.int_8)\n    @vvifyresults\n    def __lshift__(self, right):  # TODO put better type inference in irsb_c so we can have rlshift\n        \"\"\"\n        logical shift left\n        \"\"\"\n        return self.irsb_c.op_shl(self.rdt, right.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def __lt__(self, right):\n        if self._is_signed:\n            return self.irsb_c.op_cmp_slt(self.rdt, right.rdt)\n        else:\n            return self.irsb_c.op_cmp_ult(self.rdt, right.rdt)\n\n    @checkparams()\n    @vvifyresults\n    def __mod__(self, right):  # Note: nonprimitive\n        return self.irsb_c.op_mod(self.rdt, right.rdt)\n\n    @checkparams()\n    def __rmod__(self, left):\n        return left % self\n\n    @checkparams()\n    @vvifyresults\n    def __mul__(self, right):\n        if self._is_signed:\n            return self.irsb_c.op_smul(self.rdt, right.rdt)\n        else:\n            return self.irsb_c.op_umul(self.rdt, right.rdt)\n\n    @checkparams()\n    def __rmul__(self, left):\n        return left * self\n\n    @checkparams()\n    @vvifyresults\n    def __neg__(self):  # Note: nonprimitive\n        if not self._is_signed:\n            raise Exception(\"Number is unsigned, cannot change sign!\")\n        else:\n            return self.rdt * -1\n\n    @checkparams()\n    @vvifyresults\n    def __or__(self, right):\n        return self.irsb_c.op_or(self.rdt, right.rdt)\n\n    def __ror__(self, left):\n        return self | left\n\n    @checkparams()\n    @vvifyresults\n    def __pos__(self):\n        return self\n\n    @checkparams(rhstype=Type.int_8)\n    @vvifyresults\n    def __rshift__(self, right):\n        \"\"\"\n        logical shift right\n        \"\"\"\n        return self.irsb_c.op_shr(self.rdt, right.rdt)\n\n    @checkparams()\n    def __rlshift__(self, left):\n        return left << self\n\n    @checkparams()\n    def __rrshift__(self, left):\n        return left >> self\n\n    @checkparams()\n    @vvifyresults\n    def __xor__(self, right):\n        return self.irsb_c.op_xor(self.rdt, right.rdt)\n\n    def __rxor__(self, left):\n        return self ^ left\n\n    @classmethod\n    def Constant(cls, irsb_c, val, ty):\n        \"\"\"\n        Creates a constant as a VexValue\n        :param irsb_c: The IRSBCustomizer to use\n        :param val: The value, as an integer\n        :param ty: The type of the resulting VexValue\n        :return: a VexValue\n        \"\"\"\n        assert not (isinstance(val, VexValue) or isinstance(val, IRExpr))\n        rdt = irsb_c.mkconst(val, ty)\n        return cls(irsb_c, rdt)\n"
  },
  {
    "path": "pyvex/lifting/util/vex_helper.py",
    "content": "import copy\nimport re\n\nfrom pyvex.const import U1, get_type_size, ty_to_const_class, vex_int_class\nfrom pyvex.enums import IRCallee\nfrom pyvex.expr import ITE, Binop, CCall, Const, Get, Load, RdTmp, Unop\nfrom pyvex.stmt import Dirty, Exit, IMark, NoOp, Put, Store, WrTmp\n\n\nclass JumpKind:\n    Boring = \"Ijk_Boring\"\n    Call = \"Ijk_Call\"\n    Ret = \"Ijk_Ret\"\n    Segfault = \"Ijk_SigSEGV\"\n    Exit = \"Ijk_Exit\"\n    Syscall = \"Ijk_Sys_syscall\"\n    Sysenter = \"Ijk_Sys_sysenter\"\n    Invalid = \"Ijk_INVALID\"\n    NoDecode = \"Ijk_NoDecode\"\n\n\nclass TypeMeta(type):\n    typemeta_re = re.compile(r\"int_(?P<size>\\d+)$\")\n\n    def __getattr__(self, name):\n        match = self.typemeta_re.match(name)\n        if match:\n            width = int(match.group(\"size\"))\n            return vex_int_class(width).type\n        else:\n            return type.__getattr__(name)\n\n\nclass Type(metaclass=TypeMeta):\n    __metaclass__ = TypeMeta\n\n    ieee_float_16 = \"Ity_F16\"\n    ieee_float_32 = \"Ity_F32\"\n    ieee_float_64 = \"Ity_F64\"\n    ieee_float_128 = \"Ity_F128\"\n    decimal_float_32 = \"Ity_D32\"\n    decimal_float_64 = \"Ity_D64\"\n    decimal_float_128 = \"Ity_D128\"\n    simd_vector_128 = \"Ity_V128\"\n    simd_vector_256 = \"Ity_V256\"\n\n\ndef get_op_format_from_const_ty(ty):\n    return ty_to_const_class(ty).op_format\n\n\ndef make_format_op_generator(fmt_string):\n    \"\"\"\n    Return a function which generates an op format (just a string of the vex instruction)\n\n    Functions by formatting the fmt_string with the types of the arguments\n    \"\"\"\n\n    def gen(arg_types):\n        converted_arg_types = list(map(get_op_format_from_const_ty, arg_types))\n        op = fmt_string.format(arg_t=converted_arg_types)\n        return op\n\n    return gen\n\n\ndef mkbinop(fstring):\n    return lambda self, expr_a, expr_b: self.op_binary(make_format_op_generator(fstring))(expr_a, expr_b)\n\n\ndef mkunop(fstring):\n    return lambda self, expr_a: self.op_unary(make_format_op_generator(fstring))(expr_a)\n\n\ndef mkcmpop(fstring_fragment, signedness=\"\"):\n    def cmpop(self, expr_a, expr_b):\n        ty = self.get_type(expr_a)\n        fstring = f\"Iop_Cmp{fstring_fragment}{{arg_t[0]}}{signedness}\"\n        retval = mkbinop(fstring)(self, expr_a, expr_b)\n        return self.cast_to(retval, ty)\n\n    return cmpop\n\n\nclass IRSBCustomizer:\n    op_add = mkbinop(\"Iop_Add{arg_t[0]}\")\n    op_sub = mkbinop(\"Iop_Sub{arg_t[0]}\")\n    op_umul = mkbinop(\"Iop_Mul{arg_t[0]}\")\n    op_smul = mkbinop(\"Iop_MullS{arg_t[0]}\")\n    op_sdiv = mkbinop(\"Iop_DivS{arg_t[0]}\")\n    op_udiv = mkbinop(\"Iop_DivU{arg_t[0]}\")\n\n    # Custom operation that does not exist in libVEX\n    op_mod = mkbinop(\"Iop_Mod{arg_t[0]}\")\n\n    op_or = mkbinop(\"Iop_Or{arg_t[0]}\")\n    op_and = mkbinop(\"Iop_And{arg_t[0]}\")\n    op_xor = mkbinop(\"Iop_Xor{arg_t[0]}\")\n\n    op_shr = mkbinop(\"Iop_Shr{arg_t[0]}\")  # Shift Right (logical)\n    op_shl = mkbinop(\"Iop_Shl{arg_t[0]}\")  # Shift Left (logical)\n\n    op_sar = mkbinop(\"Iop_Sar{arg_t[0]}\")  # Shift Arithmetic Right operation\n\n    op_not = mkunop(\"Iop_Not{arg_t[0]}\")\n\n    op_cmp_eq = mkcmpop(\"EQ\")\n    op_cmp_ne = mkcmpop(\"NE\")\n    op_cmp_slt = mkcmpop(\"LT\", \"S\")\n    op_cmp_sle = mkcmpop(\"LE\", \"S\")\n    op_cmp_ult = mkcmpop(\"LT\", \"U\")\n    op_cmp_ule = mkcmpop(\"LE\", \"U\")\n    op_cmp_sge = mkcmpop(\"GE\", \"S\")\n    op_cmp_uge = mkcmpop(\"GE\", \"U\")\n    op_cmp_sgt = mkcmpop(\"GT\", \"S\")\n    op_cmp_ugt = mkcmpop(\"GT\", \"U\")\n\n    def __init__(self, irsb):\n        self.arch = irsb.arch\n        self.irsb = irsb\n\n    def get_type(self, rdt):\n        return rdt.result_type(self.irsb.tyenv)\n\n    # Statements (no return value)\n    def _append_stmt(self, stmt):\n        self.irsb.statements += [stmt]\n\n    def imark(self, int_addr, int_length, int_delta=0):\n        self._append_stmt(IMark(int_addr, int_length, int_delta))\n\n    def get_reg(self, regname):  # TODO move this into the lifter\n        return self.arch.registers[regname][0]\n\n    def put(self, expr_val, tuple_reg):\n        self._append_stmt(Put(copy.copy(expr_val), tuple_reg))\n\n    def store(self, addr, expr):\n        self._append_stmt(Store(copy.copy(addr), copy.copy(expr), self.arch.memory_endness))\n\n    def noop(self):\n        self._append_stmt(NoOp())\n\n    def add_exit(self, guard, dst, jk, ip):\n        \"\"\"\n        Add an exit out of the middle of an IRSB.\n        (e.g., a conditional jump)\n        :param guard: An expression, the exit is taken if true\n        :param dst: the destination of the exit (a Const)\n        :param jk: the JumpKind of this exit (probably Ijk_Boring)\n        :param ip: The address of this exit's source\n        \"\"\"\n        self.irsb.statements.append(Exit(guard, dst.con, jk, ip))\n\n    # end statements\n\n    def goto(self, addr):\n        self.irsb.next = addr\n        self.irsb.jumpkind = JumpKind.Boring\n\n    def ret(self, addr):\n        self.irsb.next = addr\n        self.irsb.jumpkind = JumpKind.Ret\n\n    def call(self, addr):\n        self.irsb.next = addr\n        self.irsb.jumpkind = JumpKind.Call\n\n    def _add_tmp(self, t):\n        return self.irsb.tyenv.add(t)\n\n    def _rdtmp(self, tmp):\n        return RdTmp.get_instance(tmp)\n\n    def _settmp(self, expr):\n        ty = self.get_type(expr)\n        tmp = self._add_tmp(ty)\n        self._append_stmt(WrTmp(tmp, expr))\n        return self._rdtmp(tmp)\n\n    def rdreg(self, reg, ty):\n        return self._settmp(Get(reg, ty))\n\n    def load(self, addr, ty):\n        return self._settmp(Load(self.arch.memory_endness, ty, copy.copy(addr)))\n\n    def op_ccall(self, retty, funcstr, args):\n        return self._settmp(CCall(retty, IRCallee(len(args), funcstr, 0xFFFF), args))\n\n    def dirty(self, retty, funcstr, args):\n        if retty is None:\n            tmp = 0xFFFFFFFF\n        else:\n            tmp = self._add_tmp(retty)\n        self._append_stmt(Dirty(IRCallee(len(args), funcstr, 0xFFFF), Const(U1(1)), args, tmp, None, None, None, None))\n        return self._rdtmp(tmp)\n\n    def ite(self, condrdt, iftruerdt, iffalserdt):\n        return self._settmp(ITE(copy.copy(condrdt), copy.copy(iffalserdt), copy.copy(iftruerdt)))\n\n    def mkconst(self, val, ty):\n        cls = ty_to_const_class(ty)\n        return Const(cls(val))\n\n    # Operations\n    def op_generic(self, Operation, op_generator):\n        def instance(*args):  # Note: The args here are all RdTmps\n            for arg in args:\n                assert isinstance(arg, RdTmp) or isinstance(arg, Const)\n            arg_types = [self.get_type(arg) for arg in args]\n            # two operations should never share the same argument instances, copy them here to ensure that\n            args = [copy.copy(a) for a in args]\n            op = Operation(op_generator(arg_types), args)\n            msg = \"operation needs to be well typed: \" + str(op)\n            assert op.typecheck(self.irsb.tyenv), msg + \"\\ntypes: \" + str(self.irsb.tyenv)\n            return self._settmp(op)\n\n        return instance\n\n    def op_binary(self, op_format_str):\n        return self.op_generic(Binop, op_format_str)\n\n    def op_unary(self, op_format_str):\n        return self.op_generic(Unop, op_format_str)\n\n    def cast_to(self, rdt, tydest, signed=False, high=False):\n        goalwidth = get_type_size(tydest)\n        rdtwidth = self.get_rdt_width(rdt)\n\n        if rdtwidth > goalwidth:\n            return self.op_narrow_int(rdt, tydest, high_half=high)\n        elif rdtwidth < goalwidth:\n            return self.op_widen_int(rdt, tydest, signed=signed)\n        else:\n            return rdt\n\n    def op_to_one_bit(self, rdt):\n        rdtty = self.get_type(rdt)\n        if rdtty not in [Type.int_64, Type.int_32]:\n            rdt = self.op_widen_int_unsigned(rdt, Type.int_32)\n        onebit = self.op_narrow_int(rdt, Type.int_1)\n        return onebit\n\n    def op_narrow_int(self, rdt, tydest, high_half=False):\n        op_name = \"{op}{high}to{dest}\".format(\n            op=\"Iop_{arg_t[0]}\", high=\"HI\" if high_half else \"\", dest=get_op_format_from_const_ty(tydest)\n        )\n        return self.op_unary(make_format_op_generator(op_name))(rdt)\n\n    def op_widen_int(self, rdt, tydest, signed=False):\n        op_name = \"{op}{sign}to{dest}\".format(\n            op=\"Iop_{arg_t[0]}\", sign=\"S\" if signed else \"U\", dest=get_op_format_from_const_ty(tydest)\n        )\n        return self.op_unary(make_format_op_generator(op_name))(rdt)\n\n    def op_widen_int_signed(self, rdt, tydest):\n        return self.op_widen_int(rdt, tydest, signed=True)\n\n    def op_widen_int_unsigned(self, rdt, tydest):\n        return self.op_widen_int(rdt, tydest, signed=False)\n\n    def get_msb(self, tmp, ty):\n        width = get_type_size(ty)\n        return self.get_bit(tmp, width - 1)\n\n    def get_bit(self, rdt, idx):\n        shifted = self.op_shr(rdt, idx)\n        bit = self.op_extract_lsb(shifted)\n        return bit\n\n    def op_extract_lsb(self, rdt):\n        bitmask = self.mkconst(1, self.get_type(rdt))\n        return self.op_and(bitmask, rdt)\n\n    def set_bit(self, rdt, idx, bval):\n        currbit = self.get_bit(rdt, idx)\n        areequalextrabits = self.op_xor(bval, currbit)\n        one = self.mkconst(1, self.get_type(areequalextrabits))\n        areequal = self.op_and(areequalextrabits, one)\n        shifted = self.op_shl(areequal, idx)\n        return self.op_xor(rdt, shifted)\n\n    def set_bits(self, rdt, idxsandvals):\n        ty = self.get_type(rdt)\n        if all([isinstance(idx, Const) for idx, _ in idxsandvals]):\n            relevantbits = self.mkconst(sum([1 << idx.con.value for idx, _ in idxsandvals]), ty)\n        else:\n            relevantbits = self.mkconst(0, ty)\n            for idx, _ in idxsandvals:\n                shifted = self.op_shl(self.mkconst(1, ty), idx)\n                relevantbits = self.op_or(relevantbits, shifted)\n        setto = self.mkconst(0, ty)\n        for idx, bval in idxsandvals:\n            bvalbit = self.op_extract_lsb(bval)\n            shifted = self.op_shl(bvalbit, idx)\n            setto = self.op_or(setto, shifted)\n        shouldflip = self.op_and(self.op_xor(setto, rdt), relevantbits)\n        return self.op_xor(rdt, shouldflip)\n\n    def get_rdt_width(self, rdt):\n        return rdt.result_size(self.irsb.tyenv)\n"
  },
  {
    "path": "pyvex/lifting/zerodivision.py",
    "content": "import copy\n\nfrom pyvex import const, expr, stmt\n\nfrom .post_processor import Postprocessor\n\n\nclass ZeroDivisionPostProcessor(Postprocessor):\n    \"\"\"\n    A postprocessor for adding zero-division checks to VEX.\n\n    For \"div rcx\", will turn:\n\n              00 | ------ IMark(0x8000, 3, 0) ------\n              01 | t0 = GET:I64(rcx)\n              02 | t1 = GET:I64(rax)\n              03 | t2 = GET:I64(rdx)\n              04 | t3 = 64HLto128(t2,t1)\n              05 | t4 = DivModU128to64(t3,t0)\n              06 | t5 = 128to64(t4)\n              07 | PUT(rax) = t5\n              08 | t6 = 128HIto64(t4)\n              09 | PUT(rdx) = t6\n              NEXT: PUT(rip) = 0x0000000000008003; Ijk_Boring\n\n    into:\n\n              00 | ------ IMark(0x8000, 3, 0) ------\n              01 | t0 = GET:I64(rcx)\n              02 | t4 = GET:I64(rax)\n              03 | t5 = GET:I64(rdx)\n              04 | t3 = 64HLto128(t5,t4)\n              05 | t9 = CmpEQ(t0,0x0000000000000000)\n              06 | if (t9) { PUT(pc) = 0x8000; Ijk_SigFPE_IntDiv }\n              07 | t2 = DivModU128to64(t3,t0)\n              08 | t6 = 128to64(t2)\n              09 | PUT(rax) = t6\n              10 | t7 = 128HIto64(t2)\n              11 | PUT(rdx) = t7\n              NEXT: PUT(rip) = 0x0000000000008003; Ijk_Boring\n    \"\"\"\n\n    def postprocess(self):\n        if self.irsb.statements is None:\n            # This is an optimized IRSB. We cannot really post-process it.\n            return\n\n        insertions = []\n        last_ip = 0\n        for i, s in enumerate(self.irsb.statements):\n            if s.tag == \"Ist_IMark\":\n                last_ip = s.addr\n            if s.tag == \"Ist_WrTmp\" and s.data.tag == \"Iex_Binop\" and (\"Div\" in s.data.op or \"Mod\" in s.data.op):\n                arg_size = s.data.args[1].result_size(self.irsb.tyenv)\n                cmp_args = [copy.copy(s.data.args[1]), expr.Const(const.vex_int_class(arg_size)(0))]\n                cmp_tmp = self.irsb.tyenv.add(\"Ity_I1\")\n                insertions.append((i, stmt.WrTmp(cmp_tmp, expr.Binop(\"Iop_CmpEQ%d\" % arg_size, cmp_args))))\n                insertions.append(\n                    (\n                        i,\n                        stmt.Exit(\n                            expr.RdTmp.get_instance(cmp_tmp),\n                            const.vex_int_class(self.irsb.arch.bits)(last_ip),\n                            \"Ijk_SigFPE_IntDiv\",\n                            self.irsb.offsIP,\n                        ),\n                    )\n                )\n\n        for i, s in reversed(insertions):\n            self.irsb.statements.insert(i, s)\n"
  },
  {
    "path": "pyvex/native.py",
    "content": "import getpass\nimport hashlib\nimport importlib.resources\nimport os\nimport pickle\nimport sys\nimport tempfile\nfrom typing import Any\n\nimport cffi\n\nfrom .vex_ffi import ffi_str as _ffi_str\n\nffi = cffi.FFI()\n\n\ndef _parse_ffi_str():\n    hash_ = hashlib.md5(_ffi_str.encode(\"utf-8\")).hexdigest()\n    cache_location = os.path.join(tempfile.gettempdir(), f\"pyvex_ffi_parser_cache.{getpass.getuser()}.{hash_}\")\n\n    if os.path.isfile(cache_location):\n        # load the cache\n        with open(cache_location, \"rb\") as f:\n            cache = pickle.loads(f.read())\n        ffi._parser._declarations = cache[\"_declarations\"]\n        ffi._parser._int_constants = cache[\"_int_constants\"]\n    else:\n        ffi.cdef(_ffi_str)\n        # cache the result\n        cache = {\n            \"_declarations\": ffi._parser._declarations,\n            \"_int_constants\": ffi._parser._int_constants,\n        }\n        # atomically write cache\n        with tempfile.NamedTemporaryFile(delete=False) as temp_file:\n            temp_file.write(pickle.dumps(cache))\n            temp_file_name = temp_file.name\n        os.replace(temp_file_name, cache_location)\n\n\ndef _find_c_lib():\n    # Load the c library for calling into VEX\n    if sys.platform in (\"win32\", \"cygwin\"):\n        library_file = \"pyvex.dll\"\n    elif sys.platform == \"darwin\":\n        library_file = \"libpyvex.dylib\"\n    else:\n        library_file = \"libpyvex.so\"\n\n    pyvex_path = str(importlib.resources.files(\"pyvex\") / \"lib\" / library_file)\n    # parse _ffi_str and use cache if possible\n    _parse_ffi_str()\n    # RTLD_GLOBAL used for sim_unicorn.so\n    lib = ffi.dlopen(pyvex_path)\n\n    if not lib.vex_init():\n        raise ImportError(\"libvex failed to initialize\")\n    # this looks up all the definitions (wtf)\n    dir(lib)\n    return lib\n\n\npvc: Any = _find_c_lib()  # This should be properly typed, but this seems non trivial\n"
  },
  {
    "path": "pyvex/py.typed",
    "content": "partial\n"
  },
  {
    "path": "pyvex/stmt.py",
    "content": "from __future__ import annotations\n\nimport logging\nfrom collections.abc import Iterator\nfrom typing import TYPE_CHECKING\n\nfrom . import expr\nfrom .const import IRConst\nfrom .enums import IRCallee, IRRegArray, VEXObject, get_enum_from_int, get_int_from_enum\nfrom .errors import PyVEXError\nfrom .expr import Const, Get, IRExpr\nfrom .native import ffi, pvc\n\nif TYPE_CHECKING:\n    from .block import IRTypeEnv\n\nlog = logging.getLogger(\"pyvex.stmt\")\n\n\nclass IRStmt(VEXObject):\n    \"\"\"\n    IR statements in VEX represents operations with side-effects.\n    \"\"\"\n\n    tag: str\n    tag_int = 0  # set automatically at bottom of file\n\n    __slots__ = []\n\n    def pp(self):\n        print(str(self))\n\n    @property\n    def child_expressions(self) -> Iterator[IRExpr]:\n        for k in self.__slots__:\n            v = getattr(self, k)\n            if isinstance(v, IRExpr):\n                # return itself\n                yield v\n                # return all the child expressions\n                yield from v.child_expressions\n\n    # ???\n    @property\n    def expressions(self):\n        return self.child_expressions\n\n    @property\n    def constants(self):\n        return sum((e.constants for e in self.expressions), [])\n\n    @staticmethod\n    def _from_c(c_stmt):\n        if c_stmt[0] == ffi.NULL:\n            return None\n\n        try:\n            stmt_class = enum_to_stmt_class(c_stmt.tag)\n        except KeyError:\n            raise PyVEXError(\"Unknown/unsupported IRStmtTag %s.\\n\" % get_enum_from_int(c_stmt.tag))\n        return stmt_class._from_c(c_stmt)\n\n    def typecheck(self, tyenv: IRTypeEnv) -> bool:  # pylint: disable=unused-argument,no-self-use\n        return True\n\n    def replace_expression(self, replacements):\n        \"\"\"\n        Replace child expressions in-place.\n\n        :param Dict[IRExpr, IRExpr] replacements:  A mapping from expression-to-find to expression-to-replace-with\n        :return:                    None\n        \"\"\"\n\n        for k in self.__slots__:\n            v = getattr(self, k)\n            if isinstance(v, IRExpr) and v in replacements:\n                setattr(self, k, replacements.get(v))\n            elif isinstance(v, IRExpr):\n                v.replace_expression(replacements)\n            elif type(v) is tuple:\n                # Rebuild the tuple\n                _lst = []\n                replaced = False\n                for expr_ in v:\n                    if isinstance(expr_, IRExpr) and expr_ in replacements:\n                        _lst.append(replacements.get(expr_))\n                        replaced = True\n                    else:\n                        _lst.append(expr_)\n                if replaced:\n                    setattr(self, k, tuple(_lst))\n\n    def __str__(self):\n        return self.pp_str(None, None, None)\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None) -> str:\n        raise NotImplementedError()\n\n\nclass NoOp(IRStmt):\n    \"\"\"\n    A no-operation statement. It is usually the result of an IR optimization.\n    \"\"\"\n\n    __slots__ = []\n\n    tag = \"Ist_NoOp\"\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return \"IR-NoOp\"\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return NoOp()\n\n\nclass IMark(IRStmt):\n    \"\"\"\n    An instruction mark. It marks the start of the statements that represent a single machine instruction (the end of\n    those statements is marked by the next IMark or the end of the IRSB).  Contains the address and length of the\n    instruction.\n    \"\"\"\n\n    __slots__ = [\"addr\", \"len\", \"delta\"]\n\n    tag = \"Ist_IMark\"\n\n    def __init__(self, addr: int, length: int, delta: int):\n        self.addr = addr\n        self.len = length\n        self.delta = delta\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return \"------ IMark(0x%x, %d, %d) ------\" % (self.addr, self.len, self.delta)\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return IMark(c_stmt.Ist.IMark.addr, c_stmt.Ist.IMark.len, c_stmt.Ist.IMark.delta)\n\n\nclass AbiHint(IRStmt):\n    \"\"\"\n    An ABI hint, provides specific information about this platform's ABI.\n    \"\"\"\n\n    __slots__ = [\"base\", \"len\", \"nia\"]\n\n    tag = \"Ist_AbiHint\"\n\n    def __init__(self, base, length, nia):\n        self.base = base\n        self.len = length\n        self.nia = nia\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return \"====== AbiHint(0x%s, %d, %s) ======\" % (self.base, self.len, self.nia)\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return AbiHint(\n            IRExpr._from_c(c_stmt.Ist.AbiHint.base), c_stmt.Ist.AbiHint.len, IRExpr._from_c(c_stmt.Ist.AbiHint.nia)\n        )\n\n\nclass Put(IRStmt):\n    \"\"\"\n    Write to a guest register, at a fixed offset in the guest state.\n    \"\"\"\n\n    __slots__ = [\"data\", \"offset\"]\n\n    tag = \"Ist_Put\"\n\n    def __init__(self, data: IRExpr, offset: int):\n        self.data = data\n        self.offset = offset\n\n    ## TODO: Check if result_size and arch are available before looking of arch register name\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        if arch is not None and tyenv is not None:\n            reg_name = arch.translate_register_name(self.offset, self.data.result_size(tyenv) // 8)\n\n        if reg_name is not None:\n            return f\"PUT({reg_name}) = {self.data}\"\n        else:\n            return f\"PUT(offset={self.offset}) = {self.data}\"\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return Put(IRExpr._from_c(c_stmt.Ist.Put.data), c_stmt.Ist.Put.offset)\n\n    def typecheck(self, tyenv):\n        return self.data.typecheck(tyenv)\n\n\nclass PutI(IRStmt):\n    \"\"\"\n    Write to a guest register, at a non-fixed offset in the guest state.\n    \"\"\"\n\n    __slots__ = [\"descr\", \"ix\", \"data\", \"bias\"]\n\n    tag = \"Ist_PutI\"\n\n    def __init__(self, descr, ix, data, bias):\n        self.descr = descr\n        self.ix = ix\n        self.data = data\n        self.bias = bias\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return \"PutI(%s)[%s,%d] = %s\" % (self.descr, self.ix, self.bias, self.data)\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return PutI(\n            IRRegArray._from_c(c_stmt.Ist.PutI.details.descr),\n            IRExpr._from_c(c_stmt.Ist.PutI.details.ix),\n            IRExpr._from_c(c_stmt.Ist.PutI.details.data),\n            c_stmt.Ist.PutI.details.bias,\n        )\n\n    def typecheck(self, tyenv):\n        dataty = self.data.typecheck(tyenv)\n        if dataty is None:\n            return False\n        if dataty != self.descr.elemTy:\n            log.debug(\"Expression doesn't match RegArray type\")\n            return False\n        return True\n\n\nclass WrTmp(IRStmt):\n    \"\"\"\n    Assign a value to a temporary.  Note that SSA rules require each tmp is only assigned to once.  IR sanity checking\n    will reject any block containing a temporary which is not assigned to exactly once.\n    \"\"\"\n\n    __slots__ = [\"data\", \"tmp\"]\n\n    tag = \"Ist_WrTmp\"\n\n    def __init__(self, tmp, data: IRExpr):\n        self.tmp = tmp\n        self.data = data\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        # Support for named register in string representation of expr.Get\n\n        if arch is not None and tyenv is not None and isinstance(self.data, Get):\n            reg_name = arch.translate_register_name(self.data.offset, self.data.result_size(tyenv) // 8)\n\n        if reg_name is not None and isinstance(self.data, expr.Get):\n            return \"t%d = %s\" % (self.tmp, self.data.pp_str_with_name(reg_name))\n        else:\n            return \"t%d = %s\" % (self.tmp, self.data)\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return WrTmp(c_stmt.Ist.WrTmp.tmp, IRExpr._from_c(c_stmt.Ist.WrTmp.data))\n\n    def typecheck(self, tyenv):\n        dataty = self.data.typecheck(tyenv)\n        if dataty is None:\n            return False\n        if dataty != tyenv.lookup(self.tmp):\n            log.debug(\"Expression doesn't match tmp type\")\n            return False\n        return True\n\n\nclass Store(IRStmt):\n    \"\"\"\n    Write a value to memory..\n    \"\"\"\n\n    __slots__ = [\"addr\", \"data\", \"end\"]\n\n    tag = \"Ist_Store\"\n\n    def __init__(self, addr: IRExpr, data: IRExpr, end: str):\n        self.addr = addr\n        self.data = data\n        self.end = end\n\n    @property\n    def endness(self):\n        return self.end\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return f\"ST{self.endness[-2:].lower()}({self.addr}) = {self.data}\"\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return Store(\n            IRExpr._from_c(c_stmt.Ist.Store.addr),\n            IRExpr._from_c(c_stmt.Ist.Store.data),\n            get_enum_from_int(c_stmt.Ist.Store.end),\n        )\n\n    def typecheck(self, tyenv):\n        dataty = self.data.typecheck(tyenv)\n        if dataty is None:\n            return False\n        addrty = self.addr.typecheck(tyenv)\n        if addrty is None:\n            return False\n        if addrty != tyenv.wordty:\n            log.debug(\"addr must be full word for arch\")\n            return False\n        if self.end not in (\"Iend_LE\", \"Iend_BE\"):\n            log.debug(\"invalid endness enum\")\n            return False\n        return True\n\n\nclass CAS(IRStmt):\n    \"\"\"\n    an atomic compare-and-swap operation.\n    \"\"\"\n\n    __slots__ = [\"addr\", \"dataLo\", \"dataHi\", \"expdLo\", \"expdHi\", \"oldLo\", \"oldHi\", \"end\"]\n\n    tag = \"Ist_CAS\"\n\n    def __init__(self, addr, dataLo, dataHi, expdLo, expdHi, oldLo, oldHi, end):\n        self.addr = addr\n        self.dataLo = dataLo\n        self.dataHi = dataHi\n        self.expdLo = expdLo\n        self.expdHi = expdHi\n        self.oldLo = oldLo\n        self.oldHi = oldHi\n        self.end = end\n\n    @property\n    def endness(self):\n        return self.end\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return \"t({},{}) = CAS{}({} :: ({},{})->({},{}))\".format(\n            self.oldLo, self.oldHi, self.end[-2:].lower(), self.addr, self.expdLo, self.expdHi, self.dataLo, self.dataHi\n        )\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return CAS(\n            IRExpr._from_c(c_stmt.Ist.CAS.details.addr),\n            IRExpr._from_c(c_stmt.Ist.CAS.details.dataLo),\n            IRExpr._from_c(c_stmt.Ist.CAS.details.dataHi),\n            IRExpr._from_c(c_stmt.Ist.CAS.details.expdLo),\n            IRExpr._from_c(c_stmt.Ist.CAS.details.expdHi),\n            c_stmt.Ist.CAS.details.oldLo,\n            c_stmt.Ist.CAS.details.oldHi,\n            get_enum_from_int(c_stmt.Ist.CAS.details.end),\n        )\n\n    def typecheck(self, tyenv):\n        addrty = self.addr.typecheck(tyenv)\n        if addrty is None:\n            return False\n        if addrty != tyenv.wordty:\n            log.debug(\"addr must be full word for arch\")\n            return False\n        if self.end not in (\"Iend_LE\", \"Iend_BE\"):\n            log.debug(\"invalid endness enum\")\n            return False\n\n        if self.oldHi == 0xFFFFFFFF:\n            # single-element case\n            if self.expdHi is not None or self.dataHi is not None:\n                log.debug(\"expdHi and dataHi must be None\")\n                return False\n            expdLoTy = self.expdLo.typecheck(tyenv)\n            dataLoTy = self.dataLo.typecheck(tyenv)\n            if expdLoTy is None or dataLoTy is None:\n                return False\n            if tyenv.lookup(self.oldLo) != expdLoTy or expdLoTy != dataLoTy:\n                log.debug(\"oldLo, expdL, dataLo must all have the same type\")\n                return False\n        else:\n            # double-element case\n            expdLoTy = self.expdLo.typecheck(tyenv)\n            dataLoTy = self.dataLo.typecheck(tyenv)\n            expdHiTy = self.expdHi.typecheck(tyenv)\n            dataHiTy = self.dataHi.typecheck(tyenv)\n            if expdLoTy is None or dataLoTy is None or expdHiTy is None or dataHiTy is None:\n                return False\n            if (\n                tyenv.lookup(self.oldLo) != expdLoTy\n                or expdLoTy != dataLoTy\n                or tyenv.lookup(self.oldHi) != expdHiTy\n                or expdHiTy != dataHiTy\n                or expdLoTy != expdHiTy\n            ):\n                log.debug(\"oldLo, expdLo, dataLo, oldHi, expdHi, dataHi must all have the same type\")\n                return False\n\n        return True\n\n\nclass LLSC(IRStmt):\n    \"\"\"\n    Either Load-Linked or Store-Conditional, depending on STOREDATA. If STOREDATA is NULL then this is a Load-Linked,\n    else it is a Store-Conditional.\n    \"\"\"\n\n    __slots__ = [\"addr\", \"storedata\", \"result\", \"end\"]\n\n    tag = \"Ist_LLSC\"\n\n    def __init__(self, addr: IRExpr, storedata: IRExpr, result: int, end: str):\n        self.addr = addr\n        self.storedata = storedata\n        self.result = result\n        self.end = end\n\n    @property\n    def endness(self):\n        return self.end\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        if self.storedata is None:\n            return \"t%d = LD%s-Linked(%s)\" % (self.result, self.end[-2:].lower(), self.addr)\n        else:\n            return \"t%d = ( ST%s-Cond(%s) = %s )\" % (self.result, self.end[-2:].lower(), self.addr, self.storedata)\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return LLSC(\n            IRExpr._from_c(c_stmt.Ist.LLSC.addr),\n            IRExpr._from_c(c_stmt.Ist.LLSC.storedata),\n            c_stmt.Ist.LLSC.result,\n            get_enum_from_int(c_stmt.Ist.LLSC.end),\n        )\n\n    def typecheck(self, tyenv):\n        addrty = self.addr.typecheck(tyenv)\n        if addrty is None:\n            return False\n        if addrty != tyenv.wordty:\n            log.debug(\"addr must be full word for arch\")\n            return False\n        if self.end not in (\"Iend_LE\", \"Iend_BE\"):\n            log.debug(\"invalid endness enum\")\n            return False\n\n        if self.storedata is not None:\n            # load-linked\n            storety = self.storedata.typecheck(tyenv)\n            if storety is None:\n                return False\n\n            if tyenv.lookup(self.result) != \"Ity_I1\":\n                log.debug(\"result tmp must be Ity_I1\")\n                return False\n\n        return True\n\n\nclass MBE(IRStmt):\n    __slots__ = [\"event\"]\n\n    tag = \"Ist_MBE\"\n\n    def __init__(self, event):\n        self.event = event\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return \"MBusEvent-\" + self.event\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return MBE(get_enum_from_int(c_stmt.Ist.MBE.event))\n\n\nclass Dirty(IRStmt):\n    __slots__ = [\"cee\", \"guard\", \"args\", \"tmp\", \"mFx\", \"mAddr\", \"mSize\", \"nFxState\"]\n\n    tag = \"Ist_Dirty\"\n\n    def __init__(self, cee, guard, args, tmp, mFx, mAddr, mSize, nFxState):\n        self.cee = cee\n        self.guard = guard\n        self.args = tuple(args)\n        self.tmp = tmp\n        self.mFx = mFx\n        self.mAddr = mAddr\n        self.mSize = mSize\n        self.nFxState = nFxState\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return \"t{} = DIRTY {} {} ::: {}({})\".format(\n            self.tmp, self.guard, \"TODO(effects)\", self.cee, \",\".join(str(a) for a in self.args)\n        )\n\n    @property\n    def child_expressions(self):\n        expressions = sum((a.child_expressions for a in self.args), [])\n        expressions.extend(self.args)\n        expressions.append(self.guard)\n        expressions.extend(self.guard.child_expressions)\n        return expressions\n\n    @staticmethod\n    def _from_c(c_stmt):\n        args = []\n        for i in range(20):\n            a = c_stmt.Ist.Dirty.details.args[i]\n            if a == ffi.NULL:\n                break\n\n            args.append(IRExpr._from_c(a))\n\n        return Dirty(\n            IRCallee._from_c(c_stmt.Ist.Dirty.details.cee),\n            IRExpr._from_c(c_stmt.Ist.Dirty.details.guard),\n            tuple(args),\n            c_stmt.Ist.Dirty.details.tmp,\n            get_enum_from_int(c_stmt.Ist.Dirty.details.mFx),\n            IRExpr._from_c(c_stmt.Ist.Dirty.details.mAddr),\n            c_stmt.Ist.Dirty.details.mSize,\n            c_stmt.Ist.Dirty.details.nFxState,\n        )\n\n\nclass Exit(IRStmt):\n    \"\"\"\n    A conditional exit from the middle of an IRSB.\n    \"\"\"\n\n    __slots__ = [\"guard\", \"dst\", \"offsIP\", \"jk\"]\n\n    tag = \"Ist_Exit\"\n\n    def __init__(self, guard: IRExpr, dst: IRConst, jk: str, offsIP: int):\n        self.guard = guard\n        self.dst = dst\n        self.offsIP = offsIP\n        self.jk = jk\n\n    @property\n    def jumpkind(self):\n        return self.jk\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        if arch is not None and tyenv is not None:\n            reg_name = arch.translate_register_name(self.offsIP, arch.bits // 8)\n\n        if reg_name is None:\n            return \"if (%s) { PUT(offset=%d) = %#x; %s }\" % (self.guard, self.offsIP, self.dst.value, self.jumpkind)\n        else:\n            return f\"if ({self.guard}) {{ PUT({reg_name}) = {self.dst.value:#x}; {self.jumpkind} }}\"\n\n    @property\n    def child_expressions(self):\n        return [self.guard] + self.guard.child_expressions + [Const(self.dst)]\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return Exit(\n            IRExpr._from_c(c_stmt.Ist.Exit.guard),\n            IRConst._from_c(c_stmt.Ist.Exit.dst),\n            get_enum_from_int(c_stmt.Ist.Exit.jk),\n            c_stmt.Ist.Exit.offsIP,\n        )\n\n    def typecheck(self, tyenv):\n        if not self.jk.startswith(\"Ijk_\"):\n            log.debug(\"Jumpkind is not a jumpkind enum\")\n            return False\n        guardty = self.guard.typecheck(tyenv)\n        if guardty is None:\n            return False\n        if guardty != \"Ity_I1\":\n            log.debug(\"guard must be Ity_I1\")\n            return False\n        return True\n\n\nclass LoadG(IRStmt):\n    \"\"\"\n    A guarded load.\n    \"\"\"\n\n    __slots__ = [\"addr\", \"alt\", \"guard\", \"dst\", \"cvt\", \"end\", \"cvt_types\"]\n\n    tag = \"Ist_LoadG\"\n\n    def __init__(self, end: str, cvt: str, dst: int, addr: IRExpr, alt: IRExpr, guard: IRExpr):\n        self.addr = addr\n        self.alt = alt\n        self.guard = guard\n        self.dst = dst\n        self.cvt = cvt\n        self.end = end\n\n        type_in = ffi.new(\"IRType *\")  # TODO separate this from the pyvex C implementation\n        type_out = ffi.new(\"IRType *\")\n        pvc.typeOfIRLoadGOp(get_int_from_enum(self.cvt), type_out, type_in)\n        type_in = ffi.cast(\"int *\", type_in)[0]\n        type_out = ffi.cast(\"int *\", type_out)[0]\n        self.cvt_types = (get_enum_from_int(type_in), get_enum_from_int(type_out))\n\n    @property\n    def endness(self):\n        return self.end\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return \"t%d = if (%s) %s(LD%s(%s)) else %s\" % (\n            self.dst,\n            self.guard,\n            self.cvt,\n            self.end[-2:].lower(),\n            self.addr,\n            self.alt,\n        )\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return LoadG(\n            get_enum_from_int(c_stmt.Ist.LoadG.details.end),\n            get_enum_from_int(c_stmt.Ist.LoadG.details.cvt),\n            c_stmt.Ist.LoadG.details.dst,\n            IRExpr._from_c(c_stmt.Ist.LoadG.details.addr),\n            IRExpr._from_c(c_stmt.Ist.LoadG.details.alt),\n            IRExpr._from_c(c_stmt.Ist.LoadG.details.guard),\n        )\n\n    def typecheck(self, tyenv):\n        addrty = self.addr.typecheck(tyenv)\n        if addrty is None:\n            return False\n        if addrty != tyenv.wordty:\n            log.debug(\"addr must be full word for arch\")\n            return False\n        if self.end not in (\"Iend_LE\", \"Iend_BE\"):\n            log.debug(\"invalid endness enum\")\n            return False\n\n        dstty = tyenv.lookup(self.dst)\n        guardty = self.guard.typecheck(tyenv)\n        altty = self.alt.typecheck(tyenv)\n\n        if guardty is None or altty is None:\n            return False\n        if dstty != \"Ity_I32\" or altty != \"Ity_I32\":\n            log.debug(\"dst and alt must be Ity_I32\")\n            return False\n        if guardty != \"Ity_I1\":\n            log.debug(\"guard must be Ity_I1\")\n            return False\n        if not self.cvt.startswith(\"ILGop_\"):\n            log.debug(\"Invalid cvt enum\")\n            return False\n        return True\n\n\nclass StoreG(IRStmt):\n    \"\"\"\n    A guarded store.\n    \"\"\"\n\n    __slots__ = [\"addr\", \"data\", \"guard\", \"end\"]\n\n    tag = \"Ist_StoreG\"\n\n    def __init__(self, end, addr, data, guard):\n        self.addr = addr\n        self.data = data\n        self.guard = guard\n        self.end = end\n\n    @property\n    def endness(self):\n        return self.end\n\n    def pp_str(self, reg_name=None, arch=None, tyenv=None):\n        return f\"if ({self.guard}) ST{self.end[-2:].lower()}({self.addr}) = {self.data}\"\n\n    @staticmethod\n    def _from_c(c_stmt):\n        return StoreG(\n            get_enum_from_int(c_stmt.Ist.StoreG.details.end),\n            IRExpr._from_c(c_stmt.Ist.StoreG.details.addr),\n            IRExpr._from_c(c_stmt.Ist.StoreG.details.data),\n            IRExpr._from_c(c_stmt.Ist.StoreG.details.guard),\n        )\n\n    def typecheck(self, tyenv):\n        addrty = self.addr.typecheck(tyenv)\n        if addrty is None:\n            return False\n        if addrty != tyenv.wordty:\n            log.debug(\"addr must be full word for arch\")\n            return False\n        if self.end not in (\"Iend_LE\", \"Iend_BE\"):\n            log.debug(\"invalid endness enum\")\n            return False\n\n        guardty = self.guard.typecheck(tyenv)\n        dataty = self.data.typecheck(tyenv)\n\n        if guardty is None or dataty is None:\n            return False\n        if guardty != \"Ity_I1\":\n            log.debug(\"guard must be Ity_I1\")\n            return False\n        return True\n\n\n_globals = globals().copy()\n#\n# Mapping from tag strings/enums to IRStmt classes\n#\ntag_to_stmt_mapping = {}\nenum_to_stmt_mapping = {}\ntag_count = 0\ncls = None\nfor cls in _globals.values():\n    if type(cls) is type and issubclass(cls, IRStmt) and cls is not IRStmt:\n        tag_to_stmt_mapping[cls.tag] = cls\n        enum_to_stmt_mapping[get_int_from_enum(cls.tag)] = cls\n        cls.tag_int = tag_count\n        tag_count += 1\ndel cls\n\n\ndef tag_to_stmt_class(tag):\n    try:\n        return tag_to_stmt_mapping[tag]\n    except KeyError:\n        raise KeyError(\"No statement class for tag %s.\" % tag)\n\n\ndef enum_to_stmt_class(tag_enum):\n    try:\n        return enum_to_stmt_mapping[tag_enum]\n    except KeyError:\n        raise KeyError(\"No statement class for tag %s.\" % get_enum_from_int(tag_enum))\n"
  },
  {
    "path": "pyvex/types.py",
    "content": "from typing import TYPE_CHECKING, Any, Protocol, Union, runtime_checkable\n\nfrom cffi.api import FFI\n\n\nclass Register(Protocol):\n    \"\"\"\n    A register. Pyvex should probably not have this dependency.\n    \"\"\"\n\n    name: str\n\n\nclass Arch(Protocol):\n    \"\"\"\n    An architecture description.\n    \"\"\"\n\n    name: str\n    ip_offset: int\n    bits: int\n    instruction_endness: str\n    memory_endness: str\n    byte_width: int\n    register_list: list[Register]\n    registers: dict[str, tuple[int, int]]\n\n    def translate_register_name(self, offset: int, size: int | None = None) -> str | None: ...\n\n    def get_register_offset(self, name: str) -> int: ...\n\n\n@runtime_checkable\nclass LibvexArch(Protocol):\n    \"\"\"\n    The description for an architecture that is usable with libvex\n    \"\"\"\n\n    vex_arch: str\n    vex_archinfo: dict[str, Any]\n\n\nPyLiftSource = Union[bytes, bytearray, memoryview]\nif TYPE_CHECKING:\n    CLiftSource = FFI.CData\nelse:\n    CLiftSource = None\nLiftSource = Union[PyLiftSource, CLiftSource]\n"
  },
  {
    "path": "pyvex/utils.py",
    "content": "import struct\nfrom collections.abc import Callable\nfrom typing import Any\n\ntry:\n    import _md5 as md5lib\nexcept ImportError:\n    import hashlib as md5lib\n\n\nmd5_unpacker = struct.Struct(\"4I\")\n\n\ndef stable_hash(t: tuple) -> int:\n    cnt = _dump_tuple(t)\n    hd = md5lib.md5(cnt).digest()\n    return md5_unpacker.unpack(hd)[0]  # 32 bits\n\n\ndef _dump_tuple(t: tuple) -> bytes:\n    cnt = b\"\"\n    for item in t:\n        if item is not None:\n            type_ = type(item)\n            if type_ in _DUMP_BY_TYPE:\n                cnt += _DUMP_BY_TYPE[type_](item)\n            else:\n                cnt += struct.pack(\"<Q\", hash(item) & 0xFFFF_FFFF_FFFF_FFFF)\n        cnt += b\"\\xf0\"\n    return cnt\n\n\ndef _dump_str(t: str) -> bytes:\n    return t.encode(\"ascii\")\n\n\ndef _dump_int(t: int) -> bytes:\n    prefix = b\"\" if t >= 0 else b\"-\"\n    t = abs(t)\n    if t <= 0xFFFF:\n        return prefix + struct.pack(\"<H\", t)\n    elif t <= 0xFFFF_FFFF:\n        return prefix + struct.pack(\"<I\", t)\n    elif t <= 0xFFFF_FFFF_FFFF_FFFF:\n        return prefix + struct.pack(\"<Q\", t)\n    else:\n        cnt = b\"\"\n        while t > 0:\n            cnt += _dump_int(t & 0xFFFF_FFFF_FFFF_FFFF)\n            t >>= 64\n        return prefix + cnt\n\n\ndef _dump_type(t: type) -> bytes:\n    return t.__name__.encode(\"ascii\")\n\n\n_DUMP_BY_TYPE: dict[type, Callable[[Any], bytes]] = {\n    tuple: _dump_tuple,\n    str: _dump_str,\n    int: _dump_int,\n    type: _dump_type,\n}\n"
  },
  {
    "path": "pyvex_c/LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 2, June 1991\n\n Copyright (C) 1989, 1991 Free Software Foundation, Inc.,\n 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The licenses for most software are designed to take away your\nfreedom to share and change it.  By contrast, the GNU General Public\nLicense is intended to guarantee your freedom to share and change free\nsoftware--to make sure the software is free for all its users.  This\nGeneral Public License applies to most of the Free Software\nFoundation's software and to any other program whose authors commit to\nusing it.  (Some other Free Software Foundation software is covered by\nthe GNU Lesser General Public License instead.)  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthis service if you wish), that you receive source code or can get it\nif you want it, that you can change the software or use pieces of it\nin new free programs; and that you know you can do these things.\n\n  To protect your rights, we need to make restrictions that forbid\nanyone to deny you these rights or to ask you to surrender the rights.\nThese restrictions translate to certain responsibilities for you if you\ndistribute copies of the software, or if you modify it.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must give the recipients all the rights that\nyou have.  You must make sure that they, too, receive or can get the\nsource code.  And you must show them these terms so they know their\nrights.\n\n  We protect your rights with two steps: (1) copyright the software, and\n(2) offer you this license which gives you legal permission to copy,\ndistribute and/or modify the software.\n\n  Also, for each author's protection and ours, we want to make certain\nthat everyone understands that there is no warranty for this free\nsoftware.  If the software is modified by someone else and passed on, we\nwant its recipients to know that what they have is not the original, so\nthat any problems introduced by others will not reflect on the original\nauthors' reputations.\n\n  Finally, any free program is threatened constantly by software\npatents.  We wish to avoid the danger that redistributors of a free\nprogram will individually obtain patent licenses, in effect making the\nprogram proprietary.  To prevent this, we have made it clear that any\npatent must be licensed for everyone's free use or not licensed at all.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                    GNU GENERAL PUBLIC LICENSE\n   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n  0. This License applies to any program or other work which contains\na notice placed by the copyright holder saying it may be distributed\nunder the terms of this General Public License.  The \"Program\", below,\nrefers to any such program or work, and a \"work based on the Program\"\nmeans either the Program or any derivative work under copyright law:\nthat is to say, a work containing the Program or a portion of it,\neither verbatim or with modifications and/or translated into another\nlanguage.  (Hereinafter, translation is included without limitation in\nthe term \"modification\".)  Each licensee is addressed as \"you\".\n\nActivities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope.  The act of\nrunning the Program is not restricted, and the output from the Program\nis covered only if its contents constitute a work based on the\nProgram (independent of having been made by running the Program).\nWhether that is true depends on what the Program does.\n\n  1. You may copy and distribute verbatim copies of the Program's\nsource code as you receive it, in any medium, provided that you\nconspicuously and appropriately publish on each copy an appropriate\ncopyright notice and disclaimer of warranty; keep intact all the\nnotices that refer to this License and to the absence of any warranty;\nand give any other recipients of the Program a copy of this License\nalong with the Program.\n\nYou may charge a fee for the physical act of transferring a copy, and\nyou may at your option offer warranty protection in exchange for a fee.\n\n  2. You may modify your copy or copies of the Program or any portion\nof it, thus forming a work based on the Program, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n    a) You must cause the modified files to carry prominent notices\n    stating that you changed the files and the date of any change.\n\n    b) You must cause any work that you distribute or publish, that in\n    whole or in part contains or is derived from the Program or any\n    part thereof, to be licensed as a whole at no charge to all third\n    parties under the terms of this License.\n\n    c) If the modified program normally reads commands interactively\n    when run, you must cause it, when started running for such\n    interactive use in the most ordinary way, to print or display an\n    announcement including an appropriate copyright notice and a\n    notice that there is no warranty (or else, saying that you provide\n    a warranty) and that users may redistribute the program under\n    these conditions, and telling the user how to view a copy of this\n    License.  (Exception: if the Program itself is interactive but\n    does not normally print such an announcement, your work based on\n    the Program is not required to print an announcement.)\n\nThese requirements apply to the modified work as a whole.  If\nidentifiable sections of that work are not derived from the Program,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works.  But when you\ndistribute the same sections as part of a whole which is a work based\non the Program, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote it.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Program.\n\nIn addition, mere aggregation of another work not based on the Program\nwith the Program (or with a work based on the Program) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n  3. You may copy and distribute the Program (or a work based on it,\nunder Section 2) in object code or executable form under the terms of\nSections 1 and 2 above provided that you also do one of the following:\n\n    a) Accompany it with the complete corresponding machine-readable\n    source code, which must be distributed under the terms of Sections\n    1 and 2 above on a medium customarily used for software interchange; or,\n\n    b) Accompany it with a written offer, valid for at least three\n    years, to give any third party, for a charge no more than your\n    cost of physically performing source distribution, a complete\n    machine-readable copy of the corresponding source code, to be\n    distributed under the terms of Sections 1 and 2 above on a medium\n    customarily used for software interchange; or,\n\n    c) Accompany it with the information you received as to the offer\n    to distribute corresponding source code.  (This alternative is\n    allowed only for noncommercial distribution and only if you\n    received the program in object code or executable form with such\n    an offer, in accord with Subsection b above.)\n\nThe source code for a work means the preferred form of the work for\nmaking modifications to it.  For an executable work, complete source\ncode means all the source code for all modules it contains, plus any\nassociated interface definition files, plus the scripts used to\ncontrol compilation and installation of the executable.  However, as a\nspecial exception, the source code distributed need not include\nanything that is normally distributed (in either source or binary\nform) with the major components (compiler, kernel, and so on) of the\noperating system on which the executable runs, unless that component\nitself accompanies the executable.\n\nIf distribution of executable or object code is made by offering\naccess to copy from a designated place, then offering equivalent\naccess to copy the source code from the same place counts as\ndistribution of the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n  4. You may not copy, modify, sublicense, or distribute the Program\nexcept as expressly provided under this License.  Any attempt\notherwise to copy, modify, sublicense or distribute the Program is\nvoid, and will automatically terminate your rights under this License.\nHowever, parties who have received copies, or rights, from you under\nthis License will not have their licenses terminated so long as such\nparties remain in full compliance.\n\n  5. You are not required to accept this License, since you have not\nsigned it.  However, nothing else grants you permission to modify or\ndistribute the Program or its derivative works.  These actions are\nprohibited by law if you do not accept this License.  Therefore, by\nmodifying or distributing the Program (or any work based on the\nProgram), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Program or works based on it.\n\n  6. Each time you redistribute the Program (or any work based on the\nProgram), the recipient automatically receives a license from the\noriginal licensor to copy, distribute or modify the Program subject to\nthese terms and conditions.  You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties to\nthis License.\n\n  7. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Program at all.  For example, if a patent\nlicense would not permit royalty-free redistribution of the Program by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Program.\n\nIf any portion of this section is held invalid or unenforceable under\nany particular circumstance, the balance of the section is intended to\napply and the section as a whole is intended to apply in other\ncircumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system, which is\nimplemented by public license practices.  Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n  8. If the distribution and/or use of the Program is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Program under this License\nmay add an explicit geographical distribution limitation excluding\nthose countries, so that distribution is permitted only in or among\ncountries not thus excluded.  In such case, this License incorporates\nthe limitation as if written in the body of this License.\n\n  9. The Free Software Foundation may publish revised and/or new versions\nof the General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\nEach version is given a distinguishing version number.  If the Program\nspecifies a version number of this License which applies to it and \"any\nlater version\", you have the option of following the terms and conditions\neither of that version or of any later version published by the Free\nSoftware Foundation.  If the Program does not specify a version number of\nthis License, you may choose any version ever published by the Free Software\nFoundation.\n\n  10. If you wish to incorporate parts of the Program into other free\nprograms whose distribution conditions are different, write to the author\nto ask for permission.  For software which is copyrighted by the Free\nSoftware Foundation, write to the Free Software Foundation; we sometimes\nmake exceptions for this.  Our decision will be guided by the two goals\nof preserving the free status of all derivatives of our free software and\nof promoting the sharing and reuse of software generally.\n\n                            NO WARRANTY\n\n  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\nFOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN\nOTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\nPROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\nOR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS\nTO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE\nPROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\nREPAIR OR CORRECTION.\n\n  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\nREDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\nINCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\nOUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\nTO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\nYOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\nPROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGES.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software; you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation; either version 2 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License along\n    with this program; if not, write to the Free Software Foundation, Inc.,\n    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nAlso add information on how to contact you by electronic and paper mail.\n\nIf the program is interactive, make it output a short notice like this\nwhen it starts in an interactive mode:\n\n    Gnomovision version 69, Copyright (C) year name of author\n    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, the commands you use may\nbe called something other than `show w' and `show c'; they could even be\nmouse-clicks or menu items--whatever suits your program.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the program, if\nnecessary.  Here is a sample; alter the names:\n\n  Yoyodyne, Inc., hereby disclaims all copyright interest in the program\n  `Gnomovision' (which makes passes at compilers) written by James Hacker.\n\n  <signature of Ty Coon>, 1 April 1989\n  Ty Coon, President of Vice\n\nThis General Public License does not permit incorporating your program into\nproprietary programs.  If your program is a subroutine library, you may\nconsider it more useful to permit linking proprietary applications with the\nlibrary.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.\n"
  },
  {
    "path": "pyvex_c/README",
    "content": "To generate the list of exports for windows:\n\ngrep -E -o -h -r \"pvc\\.[a-zA-Z0-9_]+\" | cut -c 5- | sort -u\n\nThen remove Ity_I8 and add vex_lift (called from __init__ where we can't use the name pvc to refernce it) and sizeofIRType (called from... the unicorn compatibility layer I think?)\n"
  },
  {
    "path": "pyvex_c/analysis.c",
    "content": "#include <libvex.h>\n#include <stddef.h>\n#include <assert.h>\n#include <stdlib.h>\n#include <string.h>\n#include <libvex_guest_arm.h>\n#include <libvex_guest_mips32.h>\n\n#include \"pyvex.h\"\n\nconst int _endian = 0xfe;\n#define BE_HOST (*((unsigned char*)&_endian) == 0)\n#define LE_HOST (*((unsigned char*)&_endian) != 0)\n\n\nvoid remove_noops(\n\tIRSB* irsb\n\t) {\n\tInt noops = 0, i;\n\tInt pos = 0;\n\n\tfor (i = 0; i < irsb->stmts_used; ++i) {\n\t\tif (irsb->stmts[i]->tag != Ist_NoOp) {\n\t\t\tif (i != pos) {\n\t\t\t\tirsb->stmts[pos] = irsb->stmts[i];\n\t\t\t}\n\t\t\tpos++;\n\t\t}\n\t\telse {\n\t\t\tnoops++;\n\t\t}\n\t}\n\n\tirsb->stmts_used -= noops;\n}\n\n\nvoid get_exits_and_inst_addrs(\n\t\tIRSB *irsb,\n\t\tVEXLiftResult *lift_r) {\n\tInt i, exit_ctr = 0, inst_count = 0;\n\tAddr ins_addr = -1;\n\tUInt size = 0;\n\tfor (i = 0; i < irsb->stmts_used; ++i) {\n\t\tIRStmt* stmt = irsb->stmts[i];\n\t\tif (stmt->tag == Ist_Exit) {\n\t\t\tassert(ins_addr != -1);\n\t\t\tif (exit_ctr < MAX_EXITS) {\n\t\t\t\tlift_r->exits[exit_ctr].ins_addr = ins_addr;\n\t\t\t\tlift_r->exits[exit_ctr].stmt_idx = i;\n\t\t\t\tlift_r->exits[exit_ctr].stmt = stmt;\n\t\t\t}\n\t\t\texit_ctr += 1;\n\t\t}\n\t\telse if (stmt->tag == Ist_IMark) {\n\t\t\tins_addr = stmt->Ist.IMark.addr + stmt->Ist.IMark.delta;\n\t\t\tsize += stmt->Ist.IMark.len;\n\t\t\tif (inst_count < sizeof(lift_r->inst_addrs) / sizeof(Addr)) {\n\t\t\t\tlift_r->inst_addrs[inst_count] = ins_addr;\n\t\t\t}\n\t\t\t// inst_count is incremented anyway. If lift_r->insts > 200, the overflowed\n\t\t\t// instruction addresses will not be written into inst_addrs.\n\t\t\tinst_count++;\n\t\t}\n\t}\n\n\tlift_r->exit_count = exit_ctr;\n\tlift_r->size = size;\n\tlift_r->insts = inst_count;\n}\n\nvoid get_default_exit_target(\n\t\tIRSB *irsb,\n\t\tVEXLiftResult *lift_r ) {\n\n\tIRTemp tmp;\n\tInt reg = -1;\n\tIRType reg_type = Ity_INVALID;\n\tInt i;\n\n\tlift_r->is_default_exit_constant = 0;\n\n\tif (irsb->jumpkind != Ijk_InvalICache && irsb->jumpkind != Ijk_Boring && irsb->jumpkind != Ijk_Call) {\n\t\treturn;\n\t}\n\n\tif (irsb->next->tag == Iex_Const) {\n\t\tIRConst *con = irsb->next->Iex.Const.con;\n\t\tswitch (con->tag) {\n\t\tcase Ico_U16:\n\t\t\tlift_r->is_default_exit_constant = 1;\n\t\t\tlift_r->default_exit = con->Ico.U16;\n\t\t\tbreak;\n\t\tcase Ico_U32:\n\t\t\tlift_r->is_default_exit_constant = 1;\n\t\t\tlift_r->default_exit = con->Ico.U32;\n\t\t\tbreak;\n\t\tcase Ico_U64:\n\t\t\tlift_r->is_default_exit_constant = 1;\n\t\t\tlift_r->default_exit = con->Ico.U64;\n\t\t\tbreak;\n\t\tdefault:\n\t\t\t// A weird address... we don't support it.\n\t\t\tbreak;\n\t\t}\n\t\treturn;\n\t}\n\n\tif (irsb->next->tag != Iex_RdTmp) {\n\t\t// Unexpected irsb->next type\n\t\treturn;\n\t}\n\n\t// Scan statements backwards to find the assigning statement\n\ttmp = irsb->next->Iex.RdTmp.tmp;\n\tfor (i = irsb->stmts_used - 1; i >= 0; --i) {\n\t\tIRExpr *data = NULL;\n\t\tIRStmt *stmt = irsb->stmts[i];\n\t\tif (stmt->tag == Ist_WrTmp &&\n\t\t\t\tstmt->Ist.WrTmp.tmp == tmp) {\n\t\t\tdata = stmt->Ist.WrTmp.data;\n\t\t}\n\t\telse if (stmt->tag == Ist_Put &&\n\t\t\t\tstmt->Ist.Put.offset == reg) {\n\t\t\tIRType put_type = typeOfIRExpr(irsb->tyenv, stmt->Ist.Put.data);\n\t\t\tif (put_type != reg_type) {\n\t\t\t\t// The size does not match. Give up.\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tdata = stmt->Ist.Put.data;\n\t\t}\n\t\telse if (stmt->tag == Ist_LoadG) {\n\t\t\t// We do not handle LoadG. Give up.\n\t\t\treturn;\n\t\t}\n\t\telse {\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (data->tag == Iex_Const) {\n\t\t\tlift_r->is_default_exit_constant = 1;\n\t\t\tIRConst *con = data->Iex.Const.con;\n\t\t\tswitch (con->tag) {\n\t\t\tcase Ico_U16:\n\t\t\t\tlift_r->is_default_exit_constant = 1;\n\t\t\t\tlift_r->default_exit = con->Ico.U16;\n\t\t\t\tbreak;\n\t\t\tcase Ico_U32:\n\t\t\t\tlift_r->is_default_exit_constant = 1;\n\t\t\t\tlift_r->default_exit = con->Ico.U32;\n\t\t\t\tbreak;\n\t\t\tcase Ico_U64:\n\t\t\t\tlift_r->is_default_exit_constant = 1;\n\t\t\t\tlift_r->default_exit = con->Ico.U64;\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\t// A weird address... we don't support it.\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\treturn;\n\t\t}\n\t\telse if (data->tag == Iex_RdTmp) {\n\t\t\t// Reading another temp variable\n\t\t\ttmp = data->Iex.RdTmp.tmp;\n\t\t\treg = -1;\n\t\t}\n\t\telse if (data->tag == Iex_Get) {\n\t\t\t// Reading from a register\n\t\t\ttmp = IRTemp_INVALID;\n\t\t\treg = data->Iex.Get.offset;\n\t\t\treg_type = typeOfIRExpr(irsb->tyenv, data);\n\t\t}\n\t\telse {\n\t\t\t// Something we don't currently support\n\t\t\treturn;\n\t\t}\n\t}\n\n\t// We cannot resolve it to a constant value.\n\treturn;\n}\n\n\nAddr get_value_from_const_expr(\n\tIRConst* con) {\n\n\tswitch (con->tag) {\n\tcase Ico_U8:\n\t\treturn con->Ico.U8;\n\tcase Ico_U16:\n\t\treturn con->Ico.U16;\n\tcase Ico_U32:\n\t\treturn con->Ico.U32;\n\tcase Ico_U64:\n\t\treturn con->Ico.U64;\n\tdefault:\n\t\t// A weird address...\n\t\treturn 0;\n\t}\n}\n\n//\n// Collect data references\n//\n\n\n/* General map. Shamelessly stolen from ir_opt.c in libVEX */\n\ntypedef\n   struct {\n      Bool*  inuse;\n      HWord* key;\n      HWord* val;\n      Int    size;\n      Int    used;\n   }\n   HashHW;\n\nstatic HashHW* newHHW()\n{\n   HashHW* h = malloc(sizeof(HashHW));\n   h->size   = 8;\n   h->used   = 0;\n   h->inuse  = (Bool*)malloc(h->size * sizeof(Bool));\n   h->key    = (HWord*)malloc(h->size * sizeof(HWord));\n   h->val    = (HWord*)malloc(h->size * sizeof(HWord));\n   return h;\n}\n\nstatic void freeHHW(HashHW* h)\n{\n\tfree(h->inuse);\n\tfree(h->key);\n\tfree(h->val);\n\tfree(h);\n}\n\n\n/* Look up key in the map. */\n\nstatic Bool lookupHHW(HashHW* h, /*OUT*/HWord* val, HWord key)\n{\n   Int i;\n\n   for (i = 0; i < h->used; i++) {\n      if (h->inuse[i] && h->key[i] == key) {\n         if (val)\n            *val = h->val[i];\n         return True;\n      }\n   }\n   return False;\n}\n\n\n/* Add key->val to the map.  Replaces any existing binding for key. */\n\nstatic void addToHHW(HashHW* h, HWord key, HWord val)\n{\n   Int i, j;\n\n   /* Find and replace existing binding, if any. */\n   for (i = 0; i < h->used; i++) {\n      if (h->inuse[i] && h->key[i] == key) {\n         h->val[i] = val;\n         return;\n      }\n   }\n\n   /* Ensure a space is available. */\n   if (h->used == h->size) {\n      /* Copy into arrays twice the size. */\n      Bool*  inuse2 = malloc(2 * h->size * sizeof(Bool));\n      HWord* key2   = malloc(2 * h->size * sizeof(HWord));\n      HWord* val2   = malloc(2 * h->size * sizeof(HWord));\n      for (i = j = 0; i < h->size; i++) {\n         if (!h->inuse[i]) continue;\n         inuse2[j] = True;\n         key2[j] = h->key[i];\n         val2[j] = h->val[i];\n         j++;\n      }\n      h->used = j;\n      h->size *= 2;\n\t  free(h->inuse);\n      h->inuse = inuse2;\n\t  free(h->key);\n      h->key = key2;\n\t  free(h->val);\n      h->val = val2;\n   }\n\n   /* Finally, add it. */\n   h->inuse[h->used] = True;\n   h->key[h->used] = key;\n   h->val[h->used] = val;\n   h->used++;\n}\n\n/* Remove key from the map. */\n\nstatic void removeFromHHW(HashHW* h, HWord key)\n{\n   Int i, j;\n\n   /* Find and replace existing binding, if any. */\n   for (i = 0; i < h->used; i++) {\n      if (h->inuse[i] && h->key[i] == key) {\n         h->inuse[i] = False;\n         return;\n      }\n   }\n}\n\n/* Create keys, of the form ((minoffset << 16) | maxoffset). */\n\nstatic UInt mk_key_GetPut ( Int offset, IRType ty )\n{\n   /* offset should fit in 16 bits. */\n   UInt minoff = offset;\n   UInt maxoff = minoff + sizeofIRType(ty) - 1;\n   return (minoff << 16) | maxoff;\n}\n\n\nvoid record_data_reference(\n\tVEXLiftResult *lift_r,\n\tAddr data_addr,\n\tInt size,\n\tDataRefTypes data_type,\n\tInt stmt_idx,\n\tAddr inst_addr) {\n\n\tif (lift_r->data_ref_count < MAX_DATA_REFS) {\n\t\tInt idx = lift_r->data_ref_count;\n\t\tlift_r->data_refs[idx].size = size;\n\t\tlift_r->data_refs[idx].data_addr = data_addr;\n\t\tlift_r->data_refs[idx].data_type = data_type;\n\t\tlift_r->data_refs[idx].stmt_idx = stmt_idx;\n\t\tlift_r->data_refs[idx].ins_addr = inst_addr;\n\t\tlift_r->data_ref_count++;\n\t}\n}\n\nAddr get_const_and_record(\n\tVEXLiftResult *lift_r,\n\tIRExpr *const_expr,\n\tInt size,\n\tDataRefTypes data_type,\n\tInt stmt_idx,\n\tAddr inst_addr,\n\tAddr next_inst_addr,\n\tBool record) {\n\n\tif (const_expr->tag != Iex_Const) {\n\t\t// Why are you calling me?\n\t\tassert (const_expr->tag == Iex_Const);\n\t\treturn -1;\n\t}\n\n\tAddr addr = get_value_from_const_expr(const_expr->Iex.Const.con);\n\tif (addr != next_inst_addr) {\n\t\tif (record) {\n\t\t\trecord_data_reference(lift_r, addr, size, data_type, stmt_idx, inst_addr);\n\t\t}\n        return addr;\n\t}\n    return -1;\n}\n\nvoid record_tmp_value(\n\tVEXLiftResult *lift_r,\n\tInt tmp,\n\tULong value,\n\tInt stmt_idx\n) {\n\tif (lift_r->const_val_count < MAX_CONST_VALS) {\n\t\tInt idx = lift_r->const_val_count;\n\t\tlift_r->const_vals[idx].tmp = tmp;\n\t\tlift_r->const_vals[idx].value = value;\n\t\tlift_r->const_vals[idx].stmt_idx = stmt_idx;\n\t\tlift_r->const_val_count++;\n\t}\n}\n\n\ntypedef struct {\n\tint used;\n\tULong value;\n} TmpValue;\n\n\ntypedef struct {\n\tBool in_use;\n\tULong start;\n\tULong size;\n\tunsigned char* content;\n} Region;\n\nint next_unused_region_id = 0;\n#define MAX_REGION_COUNT 1024\nRegion regions[MAX_REGION_COUNT] = {0};\n\nstatic int find_region(ULong start)\n{\n\tif (next_unused_region_id > 0 && regions[next_unused_region_id - 1].start < start) {\n\t\tif (next_unused_region_id >= MAX_REGION_COUNT) {\n\t\t\treturn -1;\n\t\t}\n\t\treturn next_unused_region_id - 1;\n\t}\n\n\tint lo = 0, hi = next_unused_region_id, mid;\n\twhile (lo != hi) {\n\t\tmid = (lo + hi) / 2;\n\t\tRegion* region = &regions[mid];\n\t\tif (region->start >= start) {\n\t\t\thi = mid;\n\t\t} else {\n\t\t\tlo = mid + 1;\n\t\t}\n\t}\n\treturn lo;\n}\n\nBool register_readonly_region(ULong start, ULong size, unsigned char* content)\n{\n\t// Where do we insert the region?\n\tif (next_unused_region_id >= MAX_REGION_COUNT) {\n\t\t// Regions are full\n\t\treturn False;\n\t}\n\n\tint pos = find_region(start);\n\tif (pos < 0) {\n\t\t// Regions are full\n\t\treturn False;\n\t}\n\n\tif (!regions[pos].in_use) {\n\t\t// it's likely to be the end - store here\n\t\tregions[pos].in_use = True;\n\t\tregions[pos].start = start;\n\t\tregions[pos].size = size;\n\t\tregions[pos].content = content;\n\t\tnext_unused_region_id++;\n\t\treturn True;\n\t}\n\n\tif (regions[pos].start == start) {\n\t\t// overwrite the current region with new data\n\t\tregions[pos].in_use = True;\n\t\tregions[pos].start = start;\n\t\tregions[pos].size = size;\n\t\tregions[pos].content = content;\n\t\treturn True;\n\t}\n\n\t// Move everything forward by one slot\n\tmemmove(&regions[pos + 1], &regions[pos], sizeof(Region) * (next_unused_region_id - pos));\n\t// Insert the new region\n\tregions[pos].in_use = True;\n\tregions[pos].start = start;\n\tregions[pos].size = size;\n\tregions[pos].content = content;\n\tnext_unused_region_id++;\n\treturn True;\n}\n\nvoid deregister_all_readonly_regions()\n{\n\tnext_unused_region_id = 0;\n\tregions[next_unused_region_id].in_use = 0;\n}\n\nBool load_value(ULong addr, int size, int endness, void *value) {\n\tint pos = find_region(addr);\n\tif (pos < 0 || pos >= next_unused_region_id) {\n\t\t// Does not exist\n\t\treturn False;\n\t}\n\tunsigned char* ptr = NULL;\n\tif (regions[pos].in_use &&\n\t\tregions[pos].start <= addr &&\n\t\tregions[pos].start <= addr + size &&\n\t\tregions[pos].start + regions[pos].size >= addr + size) {\n\t\tptr = regions[pos].content + (addr - regions[pos].start);\n\t} else if (pos > 0 &&\n\t\t\tregions[pos - 1].in_use &&\n\t\t\tregions[pos - 1].start <= addr &&\n\t\t\tregions[pos - 1].start <= addr + size &&\n\t\t\tregions[pos - 1].start + regions[pos - 1].size >= addr + size) {\n\t\tptr = regions[pos - 1].content + (addr - regions[pos - 1].start);\n\t} else {\n\t\treturn False;\n\t}\n\n\t// Do the load!\n\tif ((endness == Iend_LE && LE_HOST) || (endness == Iend_BE && BE_HOST)) {\n\t\tswitch (size) {\n\t\t\tcase 1:\n\t\t\t\t*(UChar*)value = *(UChar*)ptr;\n\t\t\t\tbreak;\n\t\t\tcase 2:\n\t\t\t\t*(UShort*)value = *(UShort*)ptr;\n\t\t\t\tbreak;\n\t\t\tcase 4:\n\t\t\t\t*(UInt*)value = *(UInt*)ptr;\n\t\t\t\tbreak;\n\t\t\tcase 8:\n\t\t\t\t*(ULong*)value = *(ULong*)ptr;\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\t{\n\t\t\t\t\tUChar* begin = (UChar*)value;\n\t\t\t\t\tfor (int n = 0; n < size; ++n) {\n\t\t\t\t\t\t*(begin + n) = *(ptr + n);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t}\n\t} else {\n\t\t// we need to swap data...\n\t\tUChar* begin = (UChar*)value;\n\t\tfor (int n = 0; n < size; ++n) {\n\t\t\t*(begin + size - n - 1) = *(ptr + n);\n\t\t}\n\t}\n\treturn True;\n}\n\n#undef MAX_REGION_COUNT\n\ntypedef struct _InitialReg {\n\tULong offset;\n\tUInt size;\n\tULong value;\n} InitialReg;\nUInt initial_reg_count = 0;\nInitialReg initial_regs[1024];\n\n\nBool register_initial_register_value(UInt offset, UInt size, ULong value)\n{\n\tif (initial_reg_count >= 1024) {\n\t\treturn False;\n\t}\n\n\tswitch (size) {\n\t\tcase 1: case 2: case 4: case 8: case 16:\n\t\t\tbreak;\n\t\tdefault:\n\t\t\treturn False;\n\t}\n\n\tUInt i = initial_reg_count;\n\tinitial_regs[i].offset = offset;\n\tinitial_regs[i].size = size;\n\tinitial_regs[i].value = value;\n\tinitial_reg_count++;\n\treturn True;\n}\n\nBool reset_initial_register_values()\n{\n\tinitial_reg_count = 0;\n\treturn True;\n}\n\n\nvoid execute_irsb(\n\tIRSB *irsb,\n\tVEXLiftResult *lift_r,\n\tVexArch guest,\n\tBool load_from_ro_regions,\n\tBool collect_data_refs,\n\tBool const_prop\n) {\n\n\tInt i;\n\tAddr inst_addr = -1, next_inst_addr = -1;\n\tHashHW* env = newHHW();\n\tTmpValue *tmps = NULL;\n\tTmpValue tmp_backingstore[1024];\n    // Record the last legitimate constant value. We do not record RdTmp or BinOp results\n    // if they are the same as the last constant.\n\tUInt last_const_value = 0;\n\n\tif (irsb->tyenv->types_used > 1024) {\n\t\ttmps = malloc(irsb->tyenv->types_used * sizeof(TmpValue));\n\t} else {\n\t\ttmps = tmp_backingstore;  // Use the local backing store to save a malloc\n\t}\n\n\tmemset(tmps, 0, irsb->tyenv->types_used * sizeof(TmpValue));\n\n\t// Set initial register values\n\tfor (i = 0; i < initial_reg_count; ++i) {\n\t\tIRType ty;\n\t\tswitch (initial_regs[i].size) {\n\t\t\tcase 1:\n\t\t\t\tty = Ity_I8;\n\t\t\t\tbreak;\n\t\t\tcase 2:\n\t\t\t\tty = Ity_I16;\n\t\t\t\tbreak;\n\t\t\tcase 4:\n\t\t\t\tty = Ity_I32;\n\t\t\t\tbreak;\n\t\t\tcase 8:\n\t\t\t\tty = Ity_I64;\n\t\t\t\tbreak;\n\t\t\tcase 16:\n\t\t\t\tty = Ity_I128;\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\tcontinue;\n\t\t}\n\t\tUInt key = mk_key_GetPut(initial_regs[i].offset, ty);\n\t\taddToHHW(env, key, initial_regs[i].value);\n\t}\n\n\tfor (i = 0; i < irsb->stmts_used; ++i) {\n\t\tIRStmt *stmt = irsb->stmts[i];\n\t\tswitch (stmt->tag) {\n\t\tcase Ist_IMark:\n\t\t\tinst_addr = stmt->Ist.IMark.addr + stmt->Ist.IMark.delta;\n\t\t\tnext_inst_addr = inst_addr + stmt->Ist.IMark.len;\n\t\t\tbreak;\n\t\tcase Ist_WrTmp:\n\t\t\tassert(inst_addr != -1 && next_inst_addr != -1);\n\t\t\t{\n\t\t\t\tIRExpr *data = stmt->Ist.WrTmp.data;\n\t\t\t\tswitch (data->tag) {\n\t\t\t\tcase Iex_Load:\n\t\t\t\t\t// load\n\t\t\t\t\t// e.g. t7 = LDle:I64(0x0000000000600ff8)\n\t\t\t\t\tif (data->Iex.Load.addr->tag == Iex_Const) {\n\t\t\t\t\t\tInt size;\n\t\t\t\t\t\tsize = sizeofIRType(typeOfIRTemp(irsb->tyenv, stmt->Ist.WrTmp.tmp));\n\t\t\t\t\t\tAddr v = get_const_and_record(lift_r, data->Iex.Load.addr, size, Dt_Integer, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t\t\t\tif (v != -1 && v != next_inst_addr) {\n\t\t\t\t\t\t\tlast_const_value = v;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Load the value if it might be a constant pointer...\n\t\t\t\t\t\tif (load_from_ro_regions) {\n\t\t\t\t\t\t\tUInt value = 0;\n\t\t\t\t\t\t\tif (load_value(data->Iex.Load.addr->Iex.Const.con->Ico.U32, size, data->Iex.Load.end, &value)) {\n\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].used = 1;\n\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].value = value;\n\t\t\t\t\t\t\t\tif (const_prop) {\n\t\t\t\t\t\t\t\t\trecord_tmp_value(lift_r, stmt->Ist.WrTmp.tmp, value, i);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if (data->Iex.Load.addr->tag == Iex_RdTmp) {\n\t\t\t\t\t\tIRTemp rdtmp = data->Iex.Load.addr->Iex.RdTmp.tmp;\n\t\t\t\t\t\tif (tmps[rdtmp].used == 1) {\n\t\t\t\t\t\t\t// The source tmp exists\n\t\t\t\t\t\t\tInt size;\n\t\t\t\t\t\t\tsize = sizeofIRType(typeOfIRTemp(irsb->tyenv, stmt->Ist.WrTmp.tmp));\n\t\t\t\t\t\t\tif (tmps[rdtmp].value != last_const_value) {\n\t\t\t\t\t\t\t\tif (collect_data_refs) {\n\t\t\t\t\t\t\t\t\trecord_data_reference(lift_r, tmps[rdtmp].value, size, Dt_Integer, i, inst_addr);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (load_from_ro_regions)\n\t\t\t\t\t\t\t\tif (guest == VexArchARM && size == 4 ||\n\t\t\t\t\t\t\t\t\tguest == VexArchMIPS32 && size == 4 ||\n\t\t\t\t\t\t\t\t\tguest == VexArchMIPS64 && size == 8) {\n\t\t\t\t\t\t\t\tULong value = 0;\n\t\t\t\t\t\t\t\tif (load_value(tmps[rdtmp].value, size, data->Iex.Load.end, &value)) {\n\t\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].used = 1;\n\t\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].value = value;\n\t\t\t\t\t\t\t\t\tif (const_prop) {\n\t\t\t\t\t\t\t\t\t\trecord_tmp_value(lift_r, stmt->Ist.WrTmp.tmp, value, i);\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\tcase Iex_Binop:\n\t\t\t\t\tif (data->Iex.Binop.op == Iop_Add32 || data->Iex.Binop.op == Iop_Add64) {\n\t\t\t\t\t\tIRExpr *arg1 = data->Iex.Binop.arg1, *arg2 = data->Iex.Binop.arg2;\n\t\t\t\t\t\tif (arg1->tag == Iex_Const && arg2->tag == Iex_Const) {\n\t\t\t\t\t\t\t// ip-related addressing\n\t\t\t\t\t\t\tAddr addr = get_value_from_const_expr(arg1->Iex.Const.con) +\n\t\t\t\t\t\t\t\tget_value_from_const_expr(arg2->Iex.Const.con);\n\t\t\t\t\t\t\tif (data->Iex.Binop.op == Iop_Add32) {\n\t\t\t\t\t\t\t\taddr &= 0xffffffff;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (addr != next_inst_addr) {\n\t\t\t\t\t\t\t\tif (addr != last_const_value) {\n\t\t\t\t\t\t\t\t\tif (collect_data_refs) {\n\t\t\t\t\t\t\t\t\t\trecord_data_reference(lift_r, addr, 0, Dt_Unknown, i, inst_addr);\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (const_prop) {\n\t\t\t\t\t\t\t\trecord_tmp_value(lift_r, stmt->Ist.WrTmp.tmp, addr, i);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Do the calculation\n\t\t\t\t\t\t\tif (arg1->tag == Iex_RdTmp\n\t\t\t\t\t\t\t\t&& tmps[arg1->Iex.RdTmp.tmp].used\n\t\t\t\t\t\t\t\t&& arg2->tag == Iex_Const) {\n\t\t\t\t\t\t\t\tULong arg1_value = tmps[arg1->Iex.RdTmp.tmp].value;\n\t\t\t\t\t\t\t\tULong arg2_value = get_value_from_const_expr(arg2->Iex.Const.con);\n\t\t\t\t\t\t\t\tULong value = arg1_value + arg2_value;\n\t\t\t\t\t\t\t\tif (data->Iex.Binop.op == Iop_Add32) {\n\t\t\t\t\t\t\t\t\tvalue &= 0xffffffff;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif (value != last_const_value) {\n\t\t\t\t\t\t\t\t\tif (collect_data_refs) {\n\t\t\t\t\t\t\t\t\t\trecord_data_reference(lift_r, value, 0, Dt_Unknown, i, inst_addr);\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].used = 1;\n\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].value = value;\n\t\t\t\t\t\t\t\tif (const_prop) {\n\t\t\t\t\t\t\t\t\trecord_tmp_value(lift_r, stmt->Ist.WrTmp.tmp, value, i);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (arg1->tag == Iex_Const\n\t\t\t\t\t\t\t\t&& arg2->tag == Iex_RdTmp\n\t\t\t\t\t\t\t\t&& tmps[arg2->Iex.RdTmp.tmp].used) {\n\t\t\t\t\t\t\t\tULong arg1_value = get_value_from_const_expr(arg1->Iex.Const.con);\n\t\t\t\t\t\t\t\tULong arg2_value = tmps[arg2->Iex.RdTmp.tmp].value;\n\t\t\t\t\t\t\t\tULong value = arg1_value + arg2_value;\n\t\t\t\t\t\t\t\tif (data->Iex.Binop.op == Iop_Add32) {\n\t\t\t\t\t\t\t\t\tvalue &= 0xffffffff;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif (value != last_const_value) {\n\t\t\t\t\t\t\t\t\tif (collect_data_refs) {\n\t\t\t\t\t\t\t\t\t\trecord_data_reference(lift_r, value, 0, Dt_Unknown, i, inst_addr);\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].used = 1;\n\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].value = value;\n\t\t\t\t\t\t\t\tif (const_prop) {\n\t\t\t\t\t\t\t\t\trecord_tmp_value(lift_r, stmt->Ist.WrTmp.tmp, value, i);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (arg2->tag == Iex_Const) {\n\t\t\t\t\t\t\t\tULong arg2_value = get_value_from_const_expr(arg2->Iex.Const.con);\n\t\t\t\t\t\t\t\tif (arg2_value != last_const_value) {\n\t\t\t\t\t\t\t\t\tif (collect_data_refs) {\n\t\t\t\t\t\t\t\t\t\trecord_data_reference(lift_r, arg2_value, 0, Dt_Unknown, i, inst_addr);\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (arg1->tag == Iex_RdTmp\n\t\t\t\t\t\t\t\t&& tmps[arg1->Iex.RdTmp.tmp].used\n\t\t\t\t\t\t\t\t&& arg2->tag == Iex_RdTmp\n\t\t\t\t\t\t\t\t&& tmps[arg2->Iex.RdTmp.tmp].used) {\n\t\t\t\t\t\t\t\tULong arg1_value = tmps[arg1->Iex.RdTmp.tmp].value;\n\t\t\t\t\t\t\t\tULong arg2_value = tmps[arg2->Iex.RdTmp.tmp].value;\n\t\t\t\t\t\t\t\tULong value = arg1_value + arg2_value;\n\t\t\t\t\t\t\t\tif (data->Iex.Binop.op == Iop_Add32) {\n\t\t\t\t\t\t\t\t\tvalue &= 0xffffffff;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].used = 1;\n\t\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].value = value;\n\t\t\t\t\t\t\t\tif (const_prop) {\n\t\t\t\t\t\t\t\t\trecord_tmp_value(lift_r, stmt->Ist.WrTmp.tmp, value, i);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\t// Normal binary operations\n\t\t\t\t\t\tif (data->Iex.Binop.arg1->tag == Iex_Const) {\n\t\t\t\t\t\t\tAddr v = get_const_and_record(lift_r, data->Iex.Binop.arg1, 0, Dt_Unknown, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t\t\t\t\tif (v != -1 && v != next_inst_addr) {\n\t\t\t\t\t\t\t\tlast_const_value = v;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (data->Iex.Binop.arg2->tag == Iex_Const) {\n\t\t\t\t\t\t\tAddr v = get_const_and_record(lift_r, data->Iex.Binop.arg2, 0, Dt_Unknown, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t\t\t\t\tif (v != -1 && v != next_inst_addr) {\n\t\t\t\t\t\t\t\tlast_const_value = v;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\tcase Iex_Const:\n\t\t\t\t\t{\n\t\t\t\t\t\tAddr v = get_const_and_record(lift_r, data, 0, Dt_Unknown, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t\t\t\tif (v != -1 && v != next_inst_addr) {\n\t\t\t\t\t\t\tlast_const_value = v;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tAddr value = get_value_from_const_expr(data->Iex.Const.con);\n\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].used = 1;\n\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].value = value;\n\t\t\t\t\t\tif (const_prop) {\n\t\t\t\t\t\t\trecord_tmp_value(lift_r, stmt->Ist.WrTmp.tmp, value, i);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\tcase Iex_ITE:\n\t\t\t\t\t{\n\t\t\t\t\t\tif (data->Iex.ITE.iftrue->tag == Iex_Const) {\n\t\t\t\t\t\t\tget_const_and_record(lift_r, data->Iex.ITE.iftrue, 0, Dt_Unknown, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (data->Iex.ITE.iffalse->tag == Iex_Const) {\n\t\t\t\t\t\t\tget_const_and_record(lift_r, data->Iex.ITE.iffalse, 0, Dt_Unknown, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\tcase Iex_Get:\n\t\t\t\t\t{\n\t\t\t\t\t\tUInt key = mk_key_GetPut(data->Iex.Get.offset, data->Iex.Get.ty);\n\t\t\t\t\t\tHWord val;\n\t\t\t\t\t\tif (lookupHHW(env, &val, key) == True) {\n\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].used = 1;\n\t\t\t\t\t\t\ttmps[stmt->Ist.WrTmp.tmp].value = val;\n\t\t\t\t\t\t\tif (const_prop) {\n\t\t\t\t\t\t\t\trecord_tmp_value(lift_r, stmt->Ist.WrTmp.tmp, val, i);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t// Unsupported for now\n\t\t\t\t\tbreak;\n\t\t\t\t} // end switch (data->tag)\n\t\t\t}\n\t\t\tbreak;\n\t\tcase Ist_Put:\n\t\t\t// put\n\t\t\t// e.g. PUT(rdi) = 0x0000000000400714\n\t\t\tassert(inst_addr != -1 && next_inst_addr != -1);\n\t\t\t{\n\t\t\t\t// Ignore itstate on ARM\n\t\t\t\tif (guest == VexArchARM && stmt->Ist.Put.offset == offsetof(VexGuestARMState, guest_ITSTATE)) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\tIRExpr *data = stmt->Ist.Put.data;\n\t\t\t\tif (data->tag == Iex_Const) {\n\t\t\t\t\tAddr v = get_const_and_record(lift_r, data, 0, Dt_Unknown, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t\t\tif (v != -1 && v != next_inst_addr) {\n\t\t\t\t\t\tlast_const_value = v;\n\t\t\t\t\t}\n\t\t\t\t\tUInt key = mk_key_GetPut(stmt->Ist.Put.offset, typeOfIRExpr(irsb->tyenv, data));\n\t\t\t\t\taddToHHW(env, key, get_value_from_const_expr(data->Iex.Const.con));\n\t\t\t\t} else if (data->tag == Iex_RdTmp) {\n\t\t\t\t\tif (tmps[data->Iex.RdTmp.tmp].used == 1) {\n\t\t\t\t\t\t// tmp is available\n\t\t\t\t\t\tIRType data_type = typeOfIRExpr(irsb->tyenv, data);\n\t\t\t\t\t\tUInt key = mk_key_GetPut(stmt->Ist.Put.offset, data_type);\n\t\t\t\t\t\tULong value = tmps[data->Iex.RdTmp.tmp].value;\n\t\t\t\t\t\taddToHHW(env, key, value);\n\t\t\t\t\t\tif (value != last_const_value) {\n\t\t\t\t\t\t\tif (collect_data_refs) {\n\t\t\t\t\t\t\t\trecord_data_reference(lift_r, value, 0, Dt_Integer, i, inst_addr);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\t// the tmp does not exist; we ignore updates to GP on MIPS32\n\t\t\t\t\t\t// this is to handle cases where gp is loaded from a stack variable\n\t\t\t\t\t\tif (guest == VexArchMIPS32 && stmt->Ist.Put.offset == offsetof(VexGuestMIPS32State, guest_r28)) {\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tIRType data_type = typeOfIRExpr(irsb->tyenv, data);\n\t\t\t\t\t\tUInt key = mk_key_GetPut(stmt->Ist.Put.offset, data_type);\n\t\t\t\t\t\tremoveFromHHW(env, key);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak;\n\t\tcase Ist_Store:\n\t\t\t// Store\n\t\t\tassert(inst_addr != -1 && next_inst_addr != -1);\n\t\t\t{\n\t\t\t\tIRExpr *store_dst = stmt->Ist.Store.addr;\n\t\t\t\tIRExpr *store_data = stmt->Ist.Store.data;\n\t\t\t\tif (store_dst->tag == Iex_Const) {\n\t\t\t\t\t// Writing to a memory destination. We can get its size by analyzing the size of store_data\n\t\t\t\t\tIRType data_type = typeOfIRExpr(irsb->tyenv, stmt->Ist.Put.data);\n\t\t\t\t\tInt data_size = 0;\n\t\t\t\t\tif (data_type != Ity_INVALID) {\n\t\t\t\t\t\tdata_size = sizeofIRType(data_type);\n\t\t\t\t\t}\n\t\t\t\t\tget_const_and_record(lift_r, store_dst, data_size,\n\t\t\t\t\t\tdata_size == 0? Dt_Unknown : Dt_StoreInteger,\n\t\t\t\t\t\ti, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t\t}\n\t\t\t\tif (store_data->tag == Iex_Const) {\n\t\t\t\t\tget_const_and_record(lift_r, store_data, 0, Dt_Unknown, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak;\n\t\tcase Ist_Dirty:\n\t\t\t// Dirty\n\t\t\tassert(inst_addr != -1 && next_inst_addr != -1);\n\t\t\tif (stmt->Ist.Dirty.details->mAddr != NULL &&\n\t\t\t\tstmt->Ist.Dirty.details->mAddr->tag == Iex_Const) {\n\t\t\t\tIRExpr *m_addr = stmt->Ist.Dirty.details->mAddr;\n\t\t\t\tget_const_and_record(lift_r, m_addr, stmt->Ist.Dirty.details->mSize, Dt_FP, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t}\n\t\t\tbreak;\n\t\tcase Ist_LoadG:\n\t\t\t// LoadG\n\t\t\t// e.g., t7 = if (t70) ILGop_Ident32(LDle(0x00032f50)) else t69\n\t\t\tif (stmt->Ist.LoadG.details->addr != NULL &&\n\t\t\t\tstmt->Ist.LoadG.details->addr->tag == Iex_Const) {\n\t\t\t\tIRExpr *addr = stmt->Ist.LoadG.details->addr;\n\t\t\t\tIRType data_type = typeOfIRExpr(irsb->tyenv, addr);\n\t\t\t\tInt data_size = 0;\n\t\t\t\tif (data_type != Ity_INVALID) {\n\t\t\t\t\tdata_size = sizeofIRType(data_type);\n\t\t\t\t}\n\t\t\t\tget_const_and_record(lift_r, addr, data_size, Dt_Unknown, i, inst_addr, next_inst_addr, collect_data_refs);\n\t\t\t}\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tbreak;\n\t\t} // end switch (stmt->tag)\n\t}\n\n\tfreeHHW(env);\n\tif (tmps != tmp_backingstore) {\n\t\tfree(tmps);\n\t}\n}\n\n/* Determine if the VEX block is an no-op */\nvoid get_is_noop_block(\n\tIRSB *irsb, VEXLiftResult *lift_r\n) {\n\t// the block is a noop block if it only has IMark statements **and** it jumps to its immediate successor. VEX will\n\t// generate such blocks when opt_level==1 and cross_insn_opt is True.\n\n\t// the block is a noop block if it only has IMark statements and IP-setting statements that set the IP to the next\n\t// location. VEX will generate such blocks when opt_level==1 and cross_insn_opt is False.\n\tAddr fallthrough_addr = 0xffffffffffffffff;\n\tBool has_other_inst = False;\n\n\tfor (int i = 0; i < irsb->stmts_used; ++i) {\n\t\tIRStmt *stmt = irsb->stmts[i];\n\t\tif (stmt->tag == Ist_IMark) {\n\t\t\t// update fallthrough_addr; it will be correct upon the last instruction\n\t\t\tfallthrough_addr = stmt->Ist.IMark.addr + stmt->Ist.IMark.delta + stmt->Ist.IMark.len;\n\t\t} else if (stmt->tag == Ist_NoOp) {\n\t\t\t// NoOp is a no-op\n\t\t} else if (stmt->tag == Ist_Put) {\n\t\t\tif (stmt->Ist.Put.data->tag == Iex_Const) {\n\t\t\t\tif (irsb->offsIP != stmt->Ist.Put.offset) {\n\t\t\t\t\t// found a register write that is not the same as the pc offset; this is not a noop block\n\t\t\t\t\tlift_r->is_noop_block = False;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// found a non-constant register write; this is not a noop block\n\t\t\t\tlift_r->is_noop_block = False;\n\t\t\t\treturn;\n\t\t\t}\n\t\t} else {\n\t\t\thas_other_inst = True;\n\t\t\tbreak;\n\t\t}\n\t}\n\tif (has_other_inst) {\n\t\tlift_r->is_noop_block = False;\n\t\treturn;\n\t}\n\n\tif (fallthrough_addr == 0xffffffffffffffff) {\n\t\t// for some reason we cannot find the fallthrough addr; just give up\n\t\tlift_r->is_noop_block = False;\n\t\treturn;\n\t}\n\n\tif (irsb->jumpkind == Ijk_Boring && irsb->next->tag == Iex_Const) {\n\t\tif (irsb->next->Iex.Const.con->tag == Ico_U32 && fallthrough_addr < 0xffffffff && fallthrough_addr == irsb->next->Iex.Const.con->Ico.U32\n\t\t\t|| irsb->next->Iex.Const.con->tag == Ico_U64 && fallthrough_addr == irsb->next->Iex.Const.con->Ico.U64) {\n\t\t\tlift_r->is_noop_block = True;\n\t\t\treturn;\n\t\t}\n\t}\n\n\tlift_r->is_noop_block = False;\n}\n"
  },
  {
    "path": "pyvex_c/e4c_lite.h",
    "content": "/*\n * exceptions4c lightweight version 1.0\n *\n * Copyright (c) 2014 Guillermo Calvo\n * Licensed under the GNU Lesser General Public License\n */\n\n#ifndef EXCEPTIONS4C_LITE\n#define EXCEPTIONS4C_LITE\n\n#include <stddef.h>\n#include <setjmp.h>\n\n/* Maximum number of nested `try` blocks */\n#ifndef E4C_MAX_FRAMES\n# define E4C_MAX_FRAMES 16\n#endif\n\n/* Maximum length (in bytes) of an exception message */\n#ifndef E4C_MESSAGE_SIZE\n# define E4C_MESSAGE_SIZE 128\n#endif\n\n/* Exception handling keywords: try/catch/finally/throw */\n#ifndef E4C_NOKEYWORDS\n# define try E4C_TRY\n# define catch(type) E4C_CATCH(type)\n# define finally E4C_FINALLY\n# define throw(type, message) E4C_THROW(type, message)\n#endif\n\n/* Represents an exception type */\nstruct e4c_exception_type{\n\tconst char * name;\n\tconst char * default_message;\n\tconst struct e4c_exception_type * supertype;\n};\n\n/* Declarations and definitions of exception types */\n#define E4C_DECLARE_EXCEPTION(name) extern const struct e4c_exception_type name\n#define E4C_DEFINE_EXCEPTION(name, default_message, supertype) const struct e4c_exception_type name = { #name, default_message, &supertype }\n\n/* Predefined exception types */\nE4C_DECLARE_EXCEPTION(RuntimeException);\nE4C_DECLARE_EXCEPTION(NullPointerException);\n\n/* Represents an instance of an exception type */\nstruct e4c_exception{\n\tchar message[E4C_MESSAGE_SIZE];\n\tconst char * file;\n\tint line;\n\tconst struct e4c_exception_type * type;\n};\n\n/* Retrieve current thrown exception */\n#define E4C_EXCEPTION e4c.err\n\n/* Returns whether current exception is of a given type */\n#define E4C_IS_INSTANCE_OF(t) ( e4c.err.type == &t || e4c_extends(e4c.err.type, &t) )\n\n/* Implementation details */\n#define E4C_TRY if(e4c_try(E4C_INFO) && setjmp(e4c.jump[e4c.frames - 1]) >= 0) while(e4c_hook(0)) if(e4c.frame[e4c.frames].stage == e4c_trying)\n#define E4C_CATCH(type) else if(e4c.frame[e4c.frames].stage == e4c_catching && E4C_IS_INSTANCE_OF(type) && e4c_hook(1))\n#define E4C_FINALLY else if(e4c.frame[e4c.frames].stage == e4c_finalizing)\n#define E4C_THROW(type, message) e4c_throw(&type, E4C_INFO, message)\n#ifndef NDEBUG\n# define E4C_INFO __FILE__, __LINE__\n#else\n# define E4C_INFO NULL, 0\n#endif\n\nenum e4c_stage{e4c_beginning, e4c_trying, e4c_catching, e4c_finalizing, e4c_done};\nextern struct e4c_context{jmp_buf jump[E4C_MAX_FRAMES]; struct e4c_exception err; struct{unsigned char stage; unsigned char uncaught;} frame[E4C_MAX_FRAMES + 1]; int frames;} e4c;\nextern int e4c_try(const char * file, int line);\nextern int e4c_hook(int is_catch);\nextern int e4c_extends(const struct e4c_exception_type * child, const struct e4c_exception_type * parent);\nextern void e4c_throw(const struct e4c_exception_type * exception_type, const char * file, int line, const char * message);\n\n# endif\n"
  },
  {
    "path": "pyvex_c/logging.c",
    "content": "// This code is GPLed by Yan Shoshitaishvili\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdarg.h>\n\n#include \"logging.h\"\n\nint log_level = 50;\n\nvoid pyvex_debug(const char *fmt, ...)\n{\n\tif (log_level > 10) return;\n\n\tfprintf(stderr, \"[[pyvex_c]]\\tDEBUG:\\t\");\n\tva_list args;\n\tva_start(args,fmt);\n\tvfprintf(stderr, fmt, args);\n\tva_end(args);\n\n\tfflush(stdout);\n}\n\nvoid pyvex_info(const char *fmt, ...)\n{\n\tif (log_level > 20) return;\n\n\tfprintf(stderr, \"[[pyvex_c]]\\tINFO:\\t\");\n\tva_list args;\n\tva_start(args, fmt);\n\tvfprintf(stderr, fmt, args);\n\tva_end(args);\n\n\tfflush(stdout);\n}\n\nvoid pyvex_error(const char *fmt, ...)\n{\n\tif (log_level > 40) return;\n\n\tfprintf(stderr, \"[[pyvex_c]]\\tERROR:\\t\");\n\tva_list args;\n\tva_start(args,fmt);\n\tvfprintf(stderr, fmt,args);\n\tva_end(args);\n\n\tfflush(stderr);\n}\n"
  },
  {
    "path": "pyvex_c/logging.h",
    "content": "// This code is GPLed by Yan Shoshitaishvili\n\n#ifndef __COMMON_H\n#define __COMMON_H\n\nextern int log_level;\n\nvoid pyvex_debug(const char *, ...);\nvoid pyvex_info(const char *, ...);\nvoid pyvex_error(const char *, ...);\n\n#endif\n"
  },
  {
    "path": "pyvex_c/postprocess.c",
    "content": "#include <libvex.h>\n#include <libvex_guest_arm.h>\n#include <stddef.h>\n\n#include \"pyvex_internal.h\"\n\n//\n// Jumpkind fixes for ARM\n//\n// If PC is moved to LR, then this should be an Ijk_Call\n//\n// Example:\n// MOV LR, PC\n// MOV PC, R8\n//\n// Note that the value of PC is directly used in IRStatements, i.e\n// instead of having:\n//   t0 = GET:I32(pc)\n//   PUT(lr) = t0\n// we have:\n//   PUT(lr) = 0x10400\n// The only case (that I've seen so far) where a temporary variable\n// is assigned to LR is:\n//   t2 = ITE(cond, t0, t1)\n//   PUT(lr) = t2\n//\nvoid arm_post_processor_determine_calls(\n\tAddr irsb_addr,  // Address of this IRSB\n\tInt irsb_size,  // Size of this IRSB\n\tInt irsb_insts,  // Number of instructions\n\tIRSB *irsb) {\n\n// Offset to the link register\n#define ARM_OFFB_LR      offsetof(VexGuestARMState,guest_R14)\n// The maximum number of tmps\n#define MAX_TMP \t\t 1000\n// The maximum offset of registers\n#define MAX_REG_OFFSET\t 1000\n// Dummy value\n#define DUMMY 0xffeffeff\n\n\tif (irsb->jumpkind != Ijk_Boring) {\n\t\treturn;\n\t}\n\n\t// Emulated CPU context\n\tAddr tmps[MAX_TMP + 1];\n\tAddr regs[MAX_REG_OFFSET + 1];\n\n\t// Initialize context\n\tInt i;\n\n\tfor (i = 0; i <= MAX_TMP; ++i) {\n\t\ttmps[i] = DUMMY;\n\t}\n\n\tfor (i = 0; i <= MAX_REG_OFFSET; ++i) {\n\t\tregs[i] = DUMMY;\n\t}\n\n\tInt lr_store_pc = 0;\n\tInt inst_ctr = 0;\n\tInt has_exit = 0;\n\tIRStmt *other_exit = NULL;\n\tAddr next_irsb_addr = (irsb_addr & (~1)) + irsb_size; // Clear the least significant bit\n\tInt is_thumb_mode = irsb_addr & 1;\n\n    // if we pop {..,lr,...}; b xxx, I bet this isn't a boring jump!\n    for (i = 0; i < irsb->stmts_used; ++i) {\n\t\tIRStmt *stmt = irsb->stmts[i];\n\t\tif (stmt->tag == Ist_Exit){\n\t\t    // HACK: FIXME: BLCC and friends set the default exit to Ijk_Boring\n\t\t    // Yet, the call is there, and it's just fine.\n\t\t    // We assume if the block has an exit AND lr stores PC, we're probably\n\t\t    // doing one of those fancy BL-ish things.\n\t\t    // Should work for BCC and friends though\n\t\t    has_exit = 1;\n\t\t    other_exit = stmt;\n\t\t}\n    }\n\n\n\tfor (i = 0; i < irsb->stmts_used; ++i) {\n\t\tIRStmt *stmt = irsb->stmts[i];\n\n\t\tif (stmt->tag == Ist_Put) {\n\t\t\t// LR is modified just before the last instruction of the block...\n\t\t\tif (stmt->Ist.Put.offset == ARM_OFFB_LR /*&& inst_ctr == irsb_insts - 1*/) {\n\t\t\t\t// ... by a constant, so test whether it is the address of the next IRSB\n\t\t\t\tif (stmt->Ist.Put.data->tag == Iex_Const) {\n\t\t\t\t\tIRConst *con = stmt->Ist.Put.data->Iex.Const.con;\n\t\t\t\t\tif (get_value_from_const_expr(con) == next_irsb_addr) {\n\t\t\t\t\t\tlr_store_pc = 1;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlr_store_pc = 0;\n\t\t\t\t\t}\n\t\t\t\t} else if (stmt->Ist.Put.data->tag == Iex_RdTmp) {\n\t\t\t\t\tInt tmp = stmt->Ist.Put.data->Iex.RdTmp.tmp;\n\t\t\t\t\tif (tmp <= MAX_TMP && next_irsb_addr == tmps[tmp]) {\n\t\t\t\t\t\tlr_store_pc = 1;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlr_store_pc = 0;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\t\t    else {\n\t\t\t\tInt reg_offset = stmt->Ist.Put.offset;\n\t\t\t\tif (reg_offset <= MAX_REG_OFFSET) {\n\t\t\t\t\tIRExpr *data = stmt->Ist.Put.data;\n\t\t\t\t\tif (data->tag == Iex_Const) {\n\t\t\t\t\t\tregs[reg_offset] = get_value_from_const_expr(stmt->Ist.Put.data->Iex.Const.con);\n\t\t\t\t\t} else if (data->tag == Iex_RdTmp) {\n\t\t\t\t\t\tInt tmp = data->Iex.RdTmp.tmp;\n\t\t\t\t\t\tif (tmp <= MAX_TMP && tmps[tmp] != DUMMY) {\n\t\t\t\t\t\t\tregs[reg_offset] = tmps[tmp];\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if (data->tag == Iex_Get) {\n\t\t\t\t\t\tInt src_reg = data->Iex.Get.offset;\n\t\t\t\t\t\tif (src_reg <= MAX_REG_OFFSET && regs[src_reg] != DUMMY) {\n\t\t\t\t\t\t\tregs[reg_offset] = regs[src_reg];\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse if (stmt->tag == Ist_WrTmp && stmt->Ist.WrTmp.tmp <= MAX_TMP) {\n\t\t\t// The PC value may propagate through the block, and since\n\t\t\t// LR is modified at the end of the block, the PC value have\n\t\t\t// to be incremented in order to match the address of the\n\t\t\t// next IRSB. So the only propagation ways that can lead to\n\t\t\t// a function call are:\n\t\t\t//\n\t\t\t//   - Iop_Add* operations (even \"sub r0, #-4\" is compiled\n\t\t\t//   as \"add r0, #4\")\n\t\t\t//   - Iop_And*, Iop_Or*, Iop_Xor*, Iop_Sh*, Iop_Not* (there\n\t\t\t//   may be some tricky and twisted ways to increment PC)\n\t\t\t//\n\t\t\tInt tmp_dst = stmt->Ist.WrTmp.tmp;\n\t\t\tif (stmt->Ist.WrTmp.data->tag == Iex_Binop) {\n\t\t\t\tIRExpr* data = stmt->Ist.WrTmp.data;\n\t\t\t\tAddr op0 = DUMMY, op1 = DUMMY;\n\t\t\t\t// Extract op0\n\t\t\t\tif (data->Iex.Binop.arg1->tag == Iex_Const) {\n\t\t\t\t\top0 = get_value_from_const_expr(data->Iex.Binop.arg1->Iex.Const.con);\n\t\t\t\t} else if (data->Iex.Binop.arg1->tag == Iex_RdTmp) {\n\t\t\t\t\tInt tmp = data->Iex.Binop.arg1->Iex.RdTmp.tmp;\n\t\t\t\t\tif (tmp <= MAX_TMP && tmps[tmp] != DUMMY) {\n\t\t\t\t\t\top0 = tmps[tmp];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Extract op1\n\t\t\t\tif (data->Iex.Binop.arg2->tag == Iex_Const) {\n\t\t\t\t\top1 = get_value_from_const_expr(data->Iex.Binop.arg2->Iex.Const.con);\n\t\t\t\t} else if (data->Iex.Binop.arg2->tag == Iex_RdTmp) {\n\t\t\t\t\tInt tmp = data->Iex.Binop.arg2->Iex.RdTmp.tmp;\n\t\t\t\t\tif (tmp <= MAX_TMP && tmps[tmp] != DUMMY) {\n\t\t\t\t\t\top1 = tmps[tmp];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (op0 != DUMMY && op1 != DUMMY) {\n\t\t\t\t\t// Both operands are loaded. Perfom calculation.\n\t\t\t\t\tswitch (data->Iex.Binop.op) {\n\t\t\t\t\tcase Iop_Add8: case Iop_Add16: case Iop_Add32: case Iop_Add64:\n\t\t\t\t\t\ttmps[tmp_dst] = op0 + op1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase Iop_Sub8: case Iop_Sub16: case Iop_Sub32: case Iop_Sub64:\n\t\t\t\t\t\ttmps[tmp_dst] = op0 - op1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase Iop_And8: case Iop_And16: case Iop_And32: case Iop_And64:\n\t\t\t\t\t\ttmps[tmp_dst] = op0 & op1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase Iop_Or8: case Iop_Or16: case Iop_Or32: case Iop_Or64:\n\t\t\t\t\t\ttmps[tmp_dst] = op0 | op1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase Iop_Xor8: case Iop_Xor16: case Iop_Xor32: case Iop_Xor64:\n\t\t\t\t\t\ttmps[tmp_dst] = op0 ^ op1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase Iop_Shl8: case Iop_Shl16: case Iop_Shl32: case Iop_Shl64:\n\t\t\t\t\t\ttmps[tmp_dst] = op0 << op1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase Iop_Shr8: case Iop_Shr16: case Iop_Shr32: case Iop_Shr64:\n\t\t\t\t\tcase Iop_Sar8: case Iop_Sar16: case Iop_Sar32: case Iop_Sar64:\n\t\t\t\t\t\ttmps[tmp_dst] = op0 >> op1;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t// Unsupported operation\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if (stmt->Ist.WrTmp.data->tag == Iex_Get) {\n\t\t\t\tInt reg_offset = stmt->Ist.WrTmp.data->Iex.Get.offset;\n\t\t\t\tif (reg_offset <= MAX_REG_OFFSET && regs[reg_offset] != DUMMY) {\n\t\t\t\t\ttmps[tmp_dst] = regs[reg_offset];\n\t\t\t\t}\n\t\t\t} else if (stmt->Ist.WrTmp.data->tag == Iex_ITE) {\n\t\t\t\t// Parse iftrue and iffalse\n\t\t\t\tIRExpr *data = stmt->Ist.WrTmp.data;\n\t\t\t\tif (data->Iex.ITE.iffalse->tag == Iex_Const) {\n\t\t\t\t\ttmps[tmp_dst] = get_value_from_const_expr(data->Iex.ITE.iffalse->Iex.Const.con);\n\t\t\t\t} else if (data->Iex.ITE.iffalse->tag == Iex_RdTmp) {\n\t\t\t\t\tInt tmp = data->Iex.ITE.iffalse->Iex.RdTmp.tmp;\n\t\t\t\t\tif (tmp <= MAX_TMP && tmps[tmp] != DUMMY) {\n\t\t\t\t\t\ttmps[tmp_dst] = tmps[tmp];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (data->Iex.ITE.iftrue->tag == Iex_Const) {\n\t\t\t\t\ttmps[tmp_dst] = get_value_from_const_expr(data->Iex.ITE.iftrue->Iex.Const.con);\n\t\t\t\t} else if (data->Iex.ITE.iftrue->tag == Iex_RdTmp) {\n\t\t\t\t\tInt tmp = data->Iex.ITE.iftrue->Iex.RdTmp.tmp;\n\t\t\t\t\tif (tmp <= MAX_TMP && tmps[tmp] != DUMMY) {\n\t\t\t\t\t\ttmps[tmp_dst] = tmps[tmp];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if (stmt->Ist.WrTmp.data->tag == Iex_RdTmp) {\n\t\t\t\tIRExpr *data = stmt->Ist.WrTmp.data;\n\t\t\t\tInt tmp = data->Iex.RdTmp.tmp;\n\t\t\t\tif (tmp <= MAX_TMP && tmps[tmp] != DUMMY) {\n\t\t\t\t\ttmps[tmp_dst] = tmps[tmp];\n\t\t\t\t}\n\t\t\t} else if (stmt->Ist.WrTmp.data->tag == Iex_Const) {\n\t\t\t\tIRConst *con = stmt->Ist.WrTmp.data->Iex.Const.con;\n\t\t\t\ttmps[tmp_dst] = get_value_from_const_expr(con);\n\t\t\t}\n\t\t}\n\t\telse if (stmt->tag == Ist_IMark) {\n\t\t\tinst_ctr++;\n\t\t}\n\t}\n\n\tif (lr_store_pc) {\n\t\tif (has_exit &&  // It has a non-default exit\n\t\t\tother_exit->Ist.Exit.jk == Ijk_Boring &&  // The non-default exit is a Boring jump\n\t\t\tget_value_from_const_expr(other_exit->Ist.Exit.dst) != next_irsb_addr + is_thumb_mode // The non-defualt exit is not skipping\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t  // the last instruction\n\t\t) {\n\t\t\t// Fix the not-default exit\n\t\t\tother_exit->Ist.Exit.jk = Ijk_Call;\n\t\t}\n\t\telse if (!has_exit || other_exit->Ist.Exit.jk != Ijk_Call) {\n\t\t\t//Fix the default exit\n\t\t\tirsb->jumpkind = Ijk_Call;\n\t\t}\n\t}\n\n// Undefine all defined values\n#undef ARM_OFFB_LR\n#undef MAX_TMP\n#undef MAX_REG_OFFSET\n#undef DUMMY\n}\n\n\n//\n// Unconditional branch fixes for MIPS32\n//\n// Handle unconditional branches\n// `beq $zero, $zero, xxxx`\n// It is translated to\n//\n// 15 | ------ IMark(0x401684, 4, 0) ------\n// 16 | t0 = CmpEQ32(0x00000000, 0x00000000)\n// 17 | PUT(128) = 0x00401688\n// 18 | ------ IMark(0x401688, 4, 0) ------\n// 19 | if (t0) goto {Ijk_Boring} 0x401684\n// 20 | PUT(128) = 0x0040168c\n// 21 | t4 = GET:I32(128)\n// NEXT: PUT(128) = t4; Ijk_Boring\n//\nvoid mips32_post_processor_fix_unconditional_exit(\n\tIRSB *irsb) {\n\n#define INVALID\t\t0xffff\n\n\tInt i;\n\tInt tmp_exit = INVALID, exit_stmt_idx = INVALID;\n\tIRConst *dst = NULL;\n\n\tfor (i = irsb->stmts_used - 1; i >= 0; --i) {\n\t\tIRStmt *stmt = irsb->stmts[i];\n\t\tif (tmp_exit == INVALID) {\n\t\t\t// Looking for the Exit statement\n\t\t\tif (stmt->tag == Ist_Exit &&\n\t\t\t\t\tstmt->Ist.Exit.jk == Ijk_Boring &&\n\t\t\t\t\tstmt->Ist.Exit.guard->tag == Iex_RdTmp) {\n\t\t\t\ttmp_exit = stmt->Ist.Exit.guard->Iex.RdTmp.tmp;\n\t\t\t\tdst = stmt->Ist.Exit.dst;\n\t\t\t\texit_stmt_idx = i;\n\t\t\t}\n\t\t}\n\t\telse if (stmt->tag == Ist_WrTmp && stmt->Ist.WrTmp.tmp == tmp_exit) {\n\t\t\t// Looking for the WrTmp statement\n\t\t\tIRExpr *data = stmt->Ist.WrTmp.data;\n\t\t\tif (data->tag == Iex_Binop &&\n\t\t\t\tdata->Iex.Binop.op == Iop_CmpEQ32 &&\n\t\t\t\tdata->Iex.Binop.arg1->tag == Iex_Const &&\n\t\t\t\tdata->Iex.Binop.arg2->tag == Iex_Const &&\n\t\t\t\tget_value_from_const_expr(data->Iex.Binop.arg1->Iex.Const.con) ==\n\t\t\t\t\tget_value_from_const_expr(data->Iex.Binop.arg2->Iex.Const.con)) {\n\t\t\t\t\t\t// We found it\n\n\t\t\t\t\t\t// Update the statements\n\t\t\t\t\t\tInt j;\n\t\t\t\t\t\tfor (j = exit_stmt_idx; j < irsb->stmts_used - 1; ++j) {\n\t\t\t\t\t\t\tirsb->stmts[j] = irsb->stmts[j + 1];\n\t\t\t\t\t\t}\n\t\t\t\t\t\tirsb->stmts_used -= 1;\n\t\t\t\t\t\t// Update the default of the IRSB\n\t\t\t\t\t\tirsb->next = IRExpr_Const(dst);\n\t\t\t}\n\t\t\tbreak;\n\t\t}\n\t}\n\n#undef INVALID\n}\n\nvoid irsb_insert(IRSB *irsb, IRStmt* stmt, Int i) {\n    addStmtToIRSB(irsb, stmt);\n\n\tIRStmt *in_air = irsb->stmts[irsb->stmts_used - 1];\n\tfor (Int j = irsb->stmts_used - 1; j > i; j--) {\n        irsb->stmts[j] = irsb->stmts[j-1];\n\t}\n\tirsb->stmts[i] = in_air;\n}\n\nvoid zero_division_side_exits(IRSB *irsb) {\n\tInt i;\n\tAddr lastIp = -1;\n\tIRType addrTy = typeOfIRExpr(irsb->tyenv, irsb->next);\n\tIRConstTag addrConst = addrTy == Ity_I32 ? Ico_U32 : addrTy == Ity_I16 ? Ico_U16 : Ico_U64;\n\tIRType argty;\n\tIRTemp cmptmp;\n\n\tfor (i = 0; i < irsb->stmts_used; i++) {\n\t\tIRStmt *stmt = irsb->stmts[i];\n\t\tswitch (stmt->tag) {\n\t\t\tcase Ist_IMark:\n\t\t\t\tlastIp = stmt->Ist.IMark.addr;\n\t\t\t\tcontinue;\n\t\t\tcase Ist_WrTmp:\n\t\t\t\tif (stmt->Ist.WrTmp.data->tag != Iex_Binop) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tswitch (stmt->Ist.WrTmp.data->Iex.Binop.op) {\n\t\t\t\t\tcase Iop_DivU32:\n\t\t\t\t\tcase Iop_DivS32:\n\t\t\t\t\tcase Iop_DivU32E:\n\t\t\t\t\tcase Iop_DivS32E:\n\t\t\t\t\tcase Iop_DivModU64to32:\n\t\t\t\t\tcase Iop_DivModS64to32:\n\t\t\t\t\t\targty = Ity_I32;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase Iop_DivU64:\n\t\t\t\t\tcase Iop_DivS64:\n\t\t\t\t\tcase Iop_DivU64E:\n\t\t\t\t\tcase Iop_DivS64E:\n\t\t\t\t\tcase Iop_DivModU128to64:\n\t\t\t\t\tcase Iop_DivModS128to64:\n\t\t\t\t\tcase Iop_DivModS64to64:\n\t\t\t\t\t\targty = Ity_I64;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\t// TODO YIKES\n\t\t\t\t\t//case Iop_DivF32:\n\t\t\t\t\t//\targty = Ity_F32;\n\n\t\t\t\t\t//case Iop_DivF64:\n\t\t\t\t\t//case Iop_DivF64r32:\n\t\t\t\t\t//\targty = Ity_F64;\n\n\t\t\t\t\t//case Iop_DivF128:\n\t\t\t\t\t//\targty = Ity_F128;\n\n\t\t\t\t\t//case Iop_DivD64:\n\t\t\t\t\t//\targty = Ity_D64;\n\n\t\t\t\t\t//case Iop_DivD128:\n\t\t\t\t\t//\targty = Ity_D128;\n\n\t\t\t\t\t//case Iop_Div32Fx4:\n\t\t\t\t\t//case Iop_Div32F0x4:\n\t\t\t\t\t//case Iop_Div64Fx2:\n\t\t\t\t\t//case Iop_Div64F0x2:\n\t\t\t\t\t//case Iop_Div64Fx4:\n\t\t\t\t\t//case Iop_Div32Fx8:\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tcmptmp = newIRTemp(irsb->tyenv, Ity_I1);\n\t\t\t\tirsb_insert(irsb, IRStmt_WrTmp(cmptmp, IRExpr_Binop(argty == Ity_I32 ? Iop_CmpEQ32 : Iop_CmpEQ64, stmt->Ist.WrTmp.data->Iex.Binop.arg2, IRExpr_Const(argty == Ity_I32 ? IRConst_U32(0) : IRConst_U64(0)))), i);\n\t\t\t\ti++;\n\t\t\t\tIRConst *failAddr = IRConst_U64(lastIp); // ohhhhh boy this is a hack\n\t\t\t\tfailAddr->tag = addrConst;\n\t\t\t\tirsb_insert(irsb, IRStmt_Exit(IRExpr_RdTmp(cmptmp), Ijk_SigFPE_IntDiv, failAddr, irsb->offsIP), i);\n\t\t\t\ti++;\n\t\t\t\tbreak;\n\n\t\tdefault:\n\t\t\tcontinue;\n\t\t}\n\t}\n}\n\n"
  },
  {
    "path": "pyvex_c/pyvex.c",
    "content": "/*\nThis is shamelessly ripped from Vine, because those guys have very very strange language preferences.\nVine is Copyright (C) 2006-2009, BitBlaze Team.\n\nYou can redistribute and modify it under the terms of the GNU GPL,\nversion 2 or later, but it is made available WITHOUT ANY WARRANTY.\nSee the top-level README file for more details.\n\nFor more information about Vine and other BitBlaze software, see our\nweb site at: http://bitblaze.cs.berkeley.edu/\n*/\n\n//======================================================================\n//\n// This file provides the interface to VEX that allows block by block\n// translation from binary to VEX IR.\n//\n//======================================================================\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <setjmp.h>\n#include <stddef.h>\n#include <libvex.h>\n\n#include \"pyvex.h\"\n#include \"pyvex_internal.h\"\n#include \"logging.h\"\n\n//======================================================================\n//\n// Globals\n//\n//======================================================================\n\n// Some info required for translation\nVexArchInfo         vai_host;\nVexGuestExtents     vge;\nVexTranslateArgs    vta;\nVexTranslateResult  vtr;\nVexAbiInfo\t        vbi;\nVexControl          vc;\n\n// Log message buffer, from vex itself\nchar *msg_buffer = NULL;\nsize_t msg_capacity = 0, msg_current_size = 0;\n\njmp_buf jumpout;\n\n//======================================================================\n//\n// Functions needed for the VEX translation\n//\n//======================================================================\n\n#ifdef _MSC_VER\n__declspec(noreturn)\n#else\n__attribute__((noreturn))\n#endif\nstatic void failure_exit(void) {\n\tlongjmp(jumpout, 1);\n}\n\nstatic void log_bytes(const HChar* bytes, SizeT nbytes) {\n\tif (msg_buffer == NULL) {\n\t\tmsg_buffer = malloc(nbytes);\n\t\tmsg_capacity = nbytes;\n\t}\n\tif (nbytes + msg_current_size > msg_capacity) {\n\t\tdo {\n\t\t\tmsg_capacity *= 2;\n\t\t} while (nbytes + msg_current_size > msg_capacity);\n\t\tmsg_buffer = realloc(msg_buffer, msg_capacity);\n\t}\n\n\tmemcpy(&msg_buffer[msg_current_size], bytes, nbytes);\n\tmsg_current_size += nbytes;\n}\n\nvoid clear_log() {\n\tif (msg_buffer != NULL) {\n\t\t\tfree(msg_buffer);\n\t\t\tmsg_buffer = NULL;\n\t\t\tmsg_capacity = 0;\n\t\t\tmsg_current_size = 0;\n\t}\n}\n\nstatic Bool chase_into_ok(void *closureV, Addr addr64) {\n\treturn False;\n}\n\nstatic UInt needs_self_check(void *callback_opaque, VexRegisterUpdates* pxControl, const VexGuestExtents *guest_extents) {\n\treturn 0;\n}\n\nstatic void *dispatch(void) {\n\treturn NULL;\n}\n\n\n//----------------------------------------------------------------------\n// Initializes VEX\n// It must be called before using VEX for translation to Valgrind IR\n//----------------------------------------------------------------------\nint vex_init() {\n\tstatic int initialized = 0;\n\tpyvex_debug(\"Initializing VEX.\\n\");\n\n\tif (initialized) {\n\t\tpyvex_debug(\"VEX already initialized.\\n\");\n\t\treturn 1;\n\t}\n\tinitialized = 1;\n\n\t// Initialize VEX\n\tLibVEX_default_VexControl(&vc);\n\tLibVEX_default_VexArchInfo(&vai_host);\n\tLibVEX_default_VexAbiInfo(&vbi);\n\n\tvc.iropt_verbosity              = 0;\n\tvc.iropt_level                  = 0;    // No optimization by default\n\t//vc.iropt_precise_memory_exns    = False;\n\tvc.iropt_unroll_thresh          = 0;\n\tvc.guest_max_insns              = 1;    // By default, we vex 1 instruction at a time\n\tvc.guest_chase_thresh           = 0;\n\tvc.arm64_allow_reordered_writeback = 0;\n\tvc.x86_optimize_callpop_idiom = 0;\n\tvc.strict_block_end = 0;\n\tvc.special_instruction_support = 0;\n\n\tpyvex_debug(\"Calling LibVEX_Init()....\\n\");\n\tif (setjmp(jumpout) == 0) {\n        // the 0 is the debug level\n        LibVEX_Init(&failure_exit, &log_bytes, 0, &vc);\n        pyvex_debug(\"LibVEX_Init() done....\\n\");\n    } else {\n        pyvex_debug(\"LibVEX_Init() failed catastrophically...\\n\");\n        return 0;\n    }\n\n#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__\n\tvai_host.endness = VexEndnessLE;\n#else\n\tvai_host.endness = VexEndnessBE;\n#endif\n\n\t// various settings to make stuff work\n\t// ... former is set to 'unspecified', but gets set in vex_inst for archs which care\n\t// ... the latter two are for dealing with gs and fs in VEX\n\tvbi.guest_stack_redzone_size = 0;\n\tvbi.guest_amd64_assume_fs_is_const = True;\n\tvbi.guest_amd64_assume_gs_is_const = True;\n\n\t//------------------------------------\n\t// options for instruction translation\n\n\t//\n\t// Architecture info\n\t//\n\tvta.arch_guest          = VexArch_INVALID; // to be assigned later\n#if __amd64__ || _WIN64\n\tvta.arch_host = VexArchAMD64;\n#elif __i386__ || _WIN32\n\tvta.arch_host = VexArchX86;\n#elif __arm__\n\tvta.arch_host = VexArchARM;\n\tvai_host.hwcaps = 7;\n#elif __aarch64__\n\tvta.arch_host = VexArchARM64;\n#elif __s390x__\n\tvta.arch_host = VexArchS390X;\n\tvai_host.hwcaps = VEX_HWCAPS_S390X_LDISP;\n#elif defined(__powerpc__) && defined(__NetBSD__)\n#  if defined(__LONG_WIDTH__) && (__LONG_WIDTH__ == 32)\n\tvta.arch_host = VexArchPPC32;\n#  endif\n#elif defined(__powerpc__)\n        vta.arch_host = VexArchPPC64;\n#elif defined(__riscv)\n#  if defined(__riscv_xlen) && (__riscv_xlen == 64)\n\tvta.arch_host = VexArchRISCV64;\n#  endif\n#else\n#error \"Unsupported host arch\"\n#endif\n\n\tvta.archinfo_host = vai_host;\n\n\t//\n\t// The actual stuff to vex\n\t//\n\tvta.guest_bytes         = NULL;             // Set in vex_insts\n\tvta.guest_bytes_addr    = 0;                // Set in vex_insts\n\n\t//\n\t// callbacks\n\t//\n\tvta.callback_opaque     = NULL;             // Used by chase_into_ok, but never actually called\n\tvta.chase_into_ok       = chase_into_ok;    // Always returns false\n\tvta.preamble_function   = NULL;\n\tvta.instrument1         = NULL;\n\tvta.instrument2         = NULL;\n\tvta.finaltidy\t    \t= NULL;\n\tvta.needs_self_check\t= needs_self_check;\n\n\tvta.disp_cp_chain_me_to_slowEP = (void *)dispatch; // Not used\n\tvta.disp_cp_chain_me_to_fastEP = (void *)dispatch; // Not used\n\tvta.disp_cp_xindir = (void *)dispatch; // Not used\n\tvta.disp_cp_xassisted = (void *)dispatch; // Not used\n\n\tvta.guest_extents       = &vge;\n\tvta.host_bytes          = NULL;           // Buffer for storing the output binary\n\tvta.host_bytes_size     = 0;\n\tvta.host_bytes_used     = NULL;\n\t// doesn't exist? vta.do_self_check       = False;\n\tvta.traceflags          = 0;                // Debug verbosity\n\t//vta.traceflags          = -1;                // Debug verbosity\n    return 1;\n}\n\n// Prepare the VexArchInfo struct\nstatic void vex_prepare_vai(VexArch arch, VexArchInfo *vai) {\n\tswitch (arch) {\n\t\tcase VexArchX86:\n\t\t\tvai->hwcaps =   VEX_HWCAPS_X86_MMXEXT |\n\t\t\t\t\t\t\tVEX_HWCAPS_X86_SSE1 |\n\t\t\t\t\t\t\tVEX_HWCAPS_X86_SSE2 |\n\t\t\t\t\t\t\tVEX_HWCAPS_X86_SSE3 |\n\t\t\t\t\t\t\tVEX_HWCAPS_X86_LZCNT;\n\t\t\tbreak;\n\t\tcase VexArchAMD64:\n\t\t\tvai->hwcaps =   VEX_HWCAPS_AMD64_SSE3 |\n\t\t\t\t\t\t\tVEX_HWCAPS_AMD64_CX16 |\n\t\t\t\t\t\t\tVEX_HWCAPS_AMD64_LZCNT |\n\t\t\t\t\t\t\tVEX_HWCAPS_AMD64_AVX |\n\t\t\t\t\t\t\tVEX_HWCAPS_AMD64_RDTSCP |\n\t\t\t\t\t\t\tVEX_HWCAPS_AMD64_BMI |\n\t\t\t\t\t\t\tVEX_HWCAPS_AMD64_AVX2;\n\t\t\tbreak;\n\t\tcase VexArchARM:\n\t\t\tvai->hwcaps = VEX_ARM_ARCHLEVEL(8) |\n\t\t\t\t\t\t\tVEX_HWCAPS_ARM_NEON |\n\t\t\t\t\t\t\tVEX_HWCAPS_ARM_VFP3;\n\t\t\tbreak;\n\t\tcase VexArchARM64:\n\t\t\tvai->hwcaps = 0;\n\t\t\tvai->arm64_dMinLine_lg2_szB = 6;\n\t\t\tvai->arm64_iMinLine_lg2_szB = 6;\n\t\t\tbreak;\n\t\tcase VexArchPPC32:\n\t\t\tvai->hwcaps =   VEX_HWCAPS_PPC32_F |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC32_V |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC32_FX |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC32_GX |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC32_VX |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC32_DFP |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC32_ISA2_07;\n\t\t\tvai->ppc_icache_line_szB = 32; // unsure if correct\n\t\t\tbreak;\n\t\tcase VexArchPPC64:\n\t\t\tvai->hwcaps =   VEX_HWCAPS_PPC64_V |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC64_FX |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC64_GX |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC64_VX |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC64_DFP |\n\t\t\t\t\t\t\tVEX_HWCAPS_PPC64_ISA2_07;\n\t\t\tvai->ppc_icache_line_szB = 64; // unsure if correct\n\t\t\tbreak;\n\t\tcase VexArchS390X:\n\t\t\tvai->hwcaps = 0;\n\t\t\tbreak;\n\t\tcase VexArchMIPS32:\n\t\tcase VexArchMIPS64:\n\t\t\tvai->hwcaps = VEX_PRID_COMP_CAVIUM;\n\t\t\tbreak;\n\t\tcase VexArchRISCV64:\n\t\t\tvai->hwcaps = 0;\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tpyvex_error(\"Invalid arch in vex_prepare_vai.\\n\");\n\t\t\tbreak;\n\t}\n}\n\n// Prepare the VexAbiInfo\nstatic void vex_prepare_vbi(VexArch arch, VexAbiInfo *vbi) {\n\t// only setting the guest_stack_redzone_size for now\n\t// this attribute is only specified by the X86, AMD64 and PPC64 ABIs\n\n\tswitch (arch) {\n\t\tcase VexArchX86:\n\t\t\tvbi->guest_stack_redzone_size = 0;\n\t\t\tbreak;\n\t\tcase VexArchAMD64:\n\t\t\tvbi->guest_stack_redzone_size = 128;\n\t\t\tbreak;\n\t\tcase VexArchPPC64:\n\t\t\tvbi->guest_stack_redzone_size = 288;\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tbreak;\n\t}\n}\n\nVEXLiftResult _lift_r;\n\n//----------------------------------------------------------------------\n// Main entry point. Do a lift.\n//----------------------------------------------------------------------\nVEXLiftResult *vex_lift(\n\t\tVexArch guest,\n\t\tVexArchInfo archinfo,\n\t\tunsigned char *insn_start,\n\t\tunsigned long long insn_addr,\n\t\tunsigned int max_insns,\n\t\tunsigned int max_bytes,\n\t\tint opt_level,\n\t\tint traceflags,\n\t\tint allow_arch_optimizations,\n\t\tint strict_block_end,\n\t\tint collect_data_refs,\n\t\tint load_from_ro_regions,\n\t\tint const_prop,\n\t\tVexRegisterUpdates px_control,\n\t\tunsigned int lookback) {\n\tVexRegisterUpdates pxControl = px_control;\n\n\tvex_prepare_vai(guest, &archinfo);\n\tvex_prepare_vbi(guest, &vbi);\n\n\tpyvex_debug(\"Guest arch: %d\\n\", guest);\n\tpyvex_debug(\"Guest arch hwcaps: %08x\\n\", archinfo.hwcaps);\n\n\tvta.archinfo_guest = archinfo;\n\tvta.arch_guest = guest;\n\tvta.abiinfo_both = vbi; // Set the vbi value\n\n\tvta.guest_bytes         = (UChar *)(insn_start);  // Ptr to actual bytes of start of instruction\n\tvta.guest_bytes_addr    = (Addr64)(insn_addr);\n\tvta.traceflags          = traceflags;\n\n\tvc.guest_max_bytes     = max_bytes;\n\tvc.guest_max_insns     = max_insns;\n\tvc.iropt_level         = opt_level;\n\tvc.lookback_amount     = lookback;\n\n\t// Gate all of these on one flag, they depend on the arch\n\tvc.arm_allow_optimizing_lookback = allow_arch_optimizations;\n\tvc.arm64_allow_reordered_writeback = allow_arch_optimizations;\n\tvc.x86_optimize_callpop_idiom = allow_arch_optimizations;\n\n\tvc.strict_block_end = strict_block_end;\n\n\tclear_log();\n\n\t// Do the actual translation\n\tif (setjmp(jumpout) == 0) {\n\t\tLibVEX_Update_Control(&vc);\n\t\t_lift_r.is_noop_block = False;\n\t\t_lift_r.data_ref_count = 0;\n\t\t_lift_r.const_val_count = 0;\n\t\t_lift_r.irsb = LibVEX_Lift(&vta, &vtr, &pxControl);\n\t\tif (!_lift_r.irsb) {\n\t\t\t// Lifting failed\n\t\t\treturn NULL;\n\t\t}\n\t\tremove_noops(_lift_r.irsb);\n\t\tif (guest == VexArchMIPS32) {\n\t\t\t// This post processor may potentially remove statements.\n\t\t\t// Call it before we get exit statements and such.\n\t\t\tmips32_post_processor_fix_unconditional_exit(_lift_r.irsb);\n\t\t}\n\t\tget_exits_and_inst_addrs(_lift_r.irsb, &_lift_r);\n\t\tget_default_exit_target(_lift_r.irsb, &_lift_r);\n\t\tif (guest == VexArchARM && _lift_r.insts > 0) {\n\t\t\tarm_post_processor_determine_calls(_lift_r.inst_addrs[0], _lift_r.size, _lift_r.insts, _lift_r.irsb);\n\t\t}\n\t\tzero_division_side_exits(_lift_r.irsb);\n\t\tget_is_noop_block(_lift_r.irsb, &_lift_r);\n\t\tif (collect_data_refs || const_prop) {\n\t\t\texecute_irsb(_lift_r.irsb, &_lift_r, guest, (Bool)load_from_ro_regions, (Bool)collect_data_refs, (Bool)const_prop);\n\t\t}\n\t\treturn &_lift_r;\n\t} else {\n\t\treturn NULL;\n\t}\n}\n"
  },
  {
    "path": "pyvex_c/pyvex.def",
    "content": "LIBRARY pyvex.dll\r\n\r\nEXPORTS\r\n  IRConst_F32\r\n  IRConst_F32i\r\n  IRConst_F64\r\n  IRConst_F64i\r\n  IRConst_U1\r\n  IRConst_U16\r\n  IRConst_U32\r\n  IRConst_U64\r\n  IRConst_U8\r\n  IRConst_V128\r\n  IRConst_V256\r\n  IRExpr_Binder\r\n  IRExpr_Binop\r\n  IRExpr_CCall\r\n  IRExpr_Const\r\n  IRExpr_GSPTR\r\n  IRExpr_Get\r\n  IRExpr_GetI\r\n  IRExpr_ITE\r\n  IRExpr_Load\r\n  IRExpr_Qop\r\n  IRExpr_RdTmp\r\n  IRExpr_Triop\r\n  IRExpr_Unop\r\n  IRExpr_VECRET\r\n  emptyIRSB\r\n  emptyIRTypeEnv\r\n  log_level\r\n  mkIRCallee\r\n  mkIRExprVec_0\r\n  mkIRExprVec_1\r\n  mkIRExprVec_2\r\n  mkIRExprVec_3\r\n  mkIRExprVec_4\r\n  mkIRExprVec_5\r\n  mkIRExprVec_6\r\n  mkIRExprVec_7\r\n  mkIRExprVec_8\r\n  mkIRRegArray\r\n  msg_buffer\r\n  msg_current_size\r\n  newIRTemp\r\n  typeOfIRExpr\r\n  typeOfIRLoadGOp\r\n  typeOfPrimop\r\n  clear_log\r\n  vex_lift\r\n  vex_init\r\n  register_readonly_region\r\n  deregister_all_readonly_regions\r\n  register_initial_register_value\r\n  reset_initial_register_values\r\n  sizeofIRType\r\n"
  },
  {
    "path": "pyvex_c/pyvex.h",
    "content": "// This code is GPLed by Yan Shoshitaishvili\n\n#ifndef __VEXIR_H\n#define __VEXIR_H\n\n#include <libvex.h>\n\n// Some info required for translation\nextern int log_level;\nextern VexTranslateArgs    vta;\n\nextern char *msg_buffer;\nextern size_t msg_current_size;\nvoid clear_log(void);\n\n//\n// Initializes VEX. This function must be called before vex_lift\n// can be used.\n//\nint vex_init(void);\n\ntypedef struct _ExitInfo {\n\tInt stmt_idx;\n\tAddr ins_addr;\n\tIRStmt *stmt;\n} ExitInfo;\n\ntypedef enum {\n\tDt_Unknown = 0x9000,\n\tDt_Integer,\n\tDt_FP,\n\tDt_StoreInteger\n} DataRefTypes;\n\ntypedef struct _DataRef {\n\tAddr data_addr;\n\tInt size;\n\tDataRefTypes data_type;\n\tInt stmt_idx;\n\tAddr ins_addr;\n} DataRef;\n\ntypedef struct _ConstVal {\n\tInt tmp;\n\tInt stmt_idx;\n\tULong value;  // 64-bit max\n} ConstVal;\n\n#define MAX_EXITS 400\n#define MAX_DATA_REFS 2000\n#define MAX_CONST_VALS 1000\n\ntypedef struct _VEXLiftResult {\n\tIRSB* irsb;\n\tInt size;\n\tBool is_noop_block;\n\t// Conditional exits\n\tInt exit_count;\n\tExitInfo exits[MAX_EXITS];\n\t// The default exit\n\tInt is_default_exit_constant;\n\tAddr default_exit;\n\t// Instruction addresses\n\tInt insts;\n\tAddr inst_addrs[200];\n\t// Data references\n\tInt data_ref_count;\n\tDataRef data_refs[MAX_DATA_REFS];\n\t// Constant propagation\n\tInt const_val_count;\n\tConstVal const_vals[MAX_CONST_VALS];\n} VEXLiftResult;\n\nVEXLiftResult *vex_lift(\n\t\tVexArch guest,\n\t\tVexArchInfo archinfo,\n\t\tunsigned char *insn_start,\n\t\tunsigned long long insn_addr,\n\t\tunsigned int max_insns,\n\t\tunsigned int max_bytes,\n\t\tint opt_level,\n\t\tint traceflags,\n\t\tint allow_arch_optimizations,\n\t\tint strict_block_end,\n\t\tint collect_data_refs,\n\t\tint load_from_ro_regions,\n\t\tint const_prop,\n\t\tVexRegisterUpdates px_control,\n\t\tunsigned int lookback_amount);\n\nBool register_readonly_region(ULong start, ULong size, unsigned char* content);\nvoid deregister_all_readonly_regions();\nBool register_initial_register_value(UInt offset, UInt size, ULong value);\nBool reset_initial_register_values();\n\n#endif\n"
  },
  {
    "path": "pyvex_c/pyvex_internal.h",
    "content": "#include \"pyvex.h\"\n\nvoid arm_post_processor_determine_calls(Addr irsb_addr, Int irsb_size, Int irsb_insts, IRSB *irsb);\nvoid mips32_post_processor_fix_unconditional_exit(IRSB *irsb);\n\nvoid remove_noops(IRSB* irsb);\nvoid zero_division_side_exits(IRSB* irsb);\nvoid get_exits_and_inst_addrs(IRSB *irsb, VEXLiftResult *lift_r);\nvoid get_default_exit_target(IRSB *irsb, VEXLiftResult *lift_r);\nvoid get_is_noop_block(IRSB *irsb, VEXLiftResult *lift_r);\nvoid execute_irsb(IRSB *irsb, VEXLiftResult *lift_r, VexArch guest, Bool load_from_ro_regions, Bool collect_data_refs, Bool const_prop);\nAddr get_value_from_const_expr(IRConst* con);\n"
  },
  {
    "path": "tests/test_arm_postprocess.py",
    "content": "import pyvex\n\n\n##########################\n### ARM Postprocessing ###\n##########################\ndef test_arm_postprocess_call():\n    for i in range(3):\n        # Thumb\n\n        # push  {r7}\n        # add   r7, sp, #0\n        # mov.w r1, #6\n        # mov   r0, pc\n        # add.w lr, r0, r1\n        # b.w   10408\n        irsb = pyvex.IRSB(\n            data=(b\"\\x80\\xb4\" b\"\\x00\\xaf\" b\"\\x4f\\xf0\\x06\\x01\" b\"\\x78\\x46\" b\"\\x00\\xeb\\x01\\x0e\" b\"\\xff\\xf7\\xec\\xbf\"),\n            mem_addr=0x1041F,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=6,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # mov   lr, pc\n        # b.w   10408\n        irsb = pyvex.IRSB(\n            data=(b\"\\xfe\\x46\" b\"\\xe9\\xe7\"),\n            mem_addr=0x10431,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=2,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # add   r2, pc, #0\n        # add.w lr, r2, #4\n        # ldr.w pc, [pc, #52]\n        irsb = pyvex.IRSB(\n            data=(b\"\\x00\\xa2\" b\"\\x02\\xf1\\x06\\x0e\" b\"\\xdf\\xf8\\x34\\xf0\"),\n            mem_addr=0x10435,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=3,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # ldr   r0, [pc, #48]\n        # mov   r1, pc\n        # add.w r2, r1, #4\n        # add.w r3, r2, #4\n        # add.w r4, r3, #4\n        # add.w lr, r4, #4\n        # mov   pc, r0\n        irsb = pyvex.IRSB(\n            data=(\n                b\"\\x0c\\x48\"\n                b\"\\x79\\x46\"\n                b\"\\x01\\xf1\\x04\\x02\"\n                b\"\\x02\\xf1\\x04\\x03\"\n                b\"\\x03\\xf1\\x04\\x04\"\n                b\"\\x04\\xf1\\x04\\x0e\"\n                b\"\\x87\\x46\"\n            ),\n            mem_addr=0x1043F,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=7,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # eor.w r0, r0, r0\n        # mov   lr, pc\n        # b.n   10460\n        irsb = pyvex.IRSB(\n            data=(b\"\\x80\\xea\\x00\\x00\" b\"\\x86\\x46\" b\"\\x01\\xe0\"),\n            mem_addr=0x10455,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=3,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Boring\"\n\n        # Thumb compiled with optimizations (gcc -O2)\n\n        # mov.w r1, #6\n        # mov   r0, pc\n        # add.w lr, r0, r1\n        # b.w   104bc\n        irsb = pyvex.IRSB(\n            data=(b\"\\x4f\\xf0\\x06\\x01\" b\"\\x78\\x46\" b\"\\x00\\xeb\\x01\\x0e\" b\"\\x00\\xf0\\xc5\\xb8\"),\n            mem_addr=0x10325,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=4,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # ldr   r0, [pc, #56]\n        # mov   r1, pc\n        # add.w r2, r1, #4\n        # add.w r3, r2, #4\n        # add.w r4, r3, #4\n        # add.w lr, r4, #4\n        # mov   pc, r0\n        irsb = pyvex.IRSB(\n            data=(\n                b\"\\x0e\\x48\"\n                b\"\\x79\\x46\"\n                b\"\\x01\\xf1\\x04\\x02\"\n                b\"\\x02\\xf1\\x04\\x03\"\n                b\"\\x03\\xf1\\x04\\x04\"\n                b\"\\x04\\xf1\\x04\\x0e\"\n                b\"\\x87\\x46\"\n            ),\n            mem_addr=0x10333,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=7,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # add   r2, pc, #0\n        # add.w lr, r2, #6\n        # ldr.w pc, [pc, #28]\n        irsb = pyvex.IRSB(\n            data=(b\"\\x00\\xa2\" b\"\\x02\\xf1\\x06\\x0e\" b\"\\xdf\\xf8\\x1c\\xf0\"),\n            mem_addr=0x10349,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=3,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # mov   lr, pc\n        # b.w   104bc\n        irsb = pyvex.IRSB(\n            data=(b\"\\xfe\\x46\" b\"\\xb2\\xe0\"),\n            mem_addr=0x10353,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=2,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # eor.w r0, r0, r0\n        # mov   lr, pc\n        # b.n   10362\n        irsb = pyvex.IRSB(\n            data=(b\"\\x80\\xea\\x00\\x00\" b\"\\x86\\x46\" b\"\\x01\\xe0\"),\n            mem_addr=0x10357,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=3,\n            bytes_offset=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Boring\"\n\n        # ARM compiled with optimizations (gcc -O2)\n\n        # mov   r1, #4\n        # mov   r0, pc\n        # add   lr, r0, r1\n        # ldr   pc, [pc, #56]\n        irsb = pyvex.IRSB(\n            data=(b\"\\x04\\x10\\xa0\\xe3\" b\"\\x0f\\x00\\xa0\\xe1\" b\"\\x01\\xe0\\x80\\xe0\" b\"\\x38\\xf0\\x9f\\xe5\"),\n            mem_addr=0x10298,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=4,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # add   r1, pc, #0\n        # add   r2, r1, #4\n        # add   r3, r2, #4\n        # add   r4, r3, #4\n        # add   lr, r4, #4\n        # b     10414\n        irsb = pyvex.IRSB(\n            data=(\n                b\"\\x00\\x10\\x8f\\xe2\"\n                b\"\\x04\\x20\\x81\\xe2\"\n                b\"\\x04\\x30\\x82\\xe2\"\n                b\"\\x04\\x40\\x83\\xe2\"\n                b\"\\x04\\xe0\\x84\\xe2\"\n                b\"\\x54\\x00\\x00\\xea\"\n            ),\n            mem_addr=0x102A8,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=6,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # mov   lr, pc\n        # b     10414\n        irsb = pyvex.IRSB(\n            data=(b\"\\x0f\\xe0\\xa0\\xe1\" b\"\\x52\\x00\\x00\\xea\"),\n            mem_addr=0x102C0,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=2,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # eor   r0, r0, r0\n        # mov   lr, r0\n        # b     102d8\n        irsb = pyvex.IRSB(\n            data=(b\"\\x00\\x00\\x20\\xe0\" b\"\\x00\\xe0\\xa0\\xe1\" b\"\\x00\\x00\\x00\\xea\"),\n            mem_addr=0x102C8,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=3,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Boring\"\n\n        # ARM\n\n        # push  {fp}\n        # add   fp, sp, #0\n        # mov   r1, #4\n        # mov   r0, pc\n        # add   lr, r0, r1\n        # ldr   pc, [pc, #68]\n        irsb = pyvex.IRSB(\n            data=(\n                b\"\\x04\\xb0\\x2d\\xe5\"\n                b\"\\x00\\xb0\\x8d\\xe2\"\n                b\"\\x04\\x10\\xa0\\xe3\"\n                b\"\\x0f\\x00\\xa0\\xe1\"\n                b\"\\x01\\xe0\\x80\\xe0\"\n                b\"\\x44\\xf0\\x9f\\xe5\"\n            ),\n            mem_addr=0x103E8,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=6,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # add   r1, pc, #0\n        # add   r2, r1, #4\n        # add   r3, r2, #4\n        # add   r4, r3, #4\n        # add   lr, r4, #4\n        # b     103c4\n        irsb = pyvex.IRSB(\n            data=(\n                b\"\\x00\\x10\\x8f\\xe2\"\n                b\"\\x04\\x20\\x81\\xe2\"\n                b\"\\x04\\x30\\x82\\xe2\"\n                b\"\\x04\\x40\\x83\\xe2\"\n                b\"\\x04\\xe0\\x84\\xe2\"\n                b\"\\x54\\xff\\xff\\xea\"\n            ),\n            mem_addr=0x10400,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=6,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # mov   lr, pc\n        # b     103c4\n        irsb = pyvex.IRSB(\n            data=(b\"\\x0f\\xe0\\xa0\\xe1\" b\"\\xe8\\xff\\xff\\xea\"),\n            mem_addr=0x10418,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=2,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # eor   r0, r0, r0\n        # mov   lr, r0\n        # b     10430\n        irsb = pyvex.IRSB(\n            data=(b\"\\x00\\x00\\x20\\xe0\" b\"\\x00\\xe0\\xa0\\xe1\" b\"\\x00\\x00\\x00\\xea\"),\n            mem_addr=0x10420,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=3,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Boring\"\n\n        # From a \"real thing\" compiled with armc\n        # ARM:\n        #\n        irsb = pyvex.IRSB(\n            data=(\n                b\"H\\x10\\x9b\\xe5\"\n                b\"\\x0b\\x00\\xa0\\xe1\"\n                b\"\\x04 \\x91\\xe5\"\n                b\"\\x04\\xe0\\x8f\\xe2\"\n                b\"\\x01\\x10\\x82\\xe0\"\n                b\"\\x01\\xf0\\xa0\\xe1\"\n            ),\n            mem_addr=0x264B4C,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=6,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Call\"\n\n        # 400000  str     lr, [sp,#-0x4]!\n        # 400004  mov     r1, #0xa\n        # 400008  cmp     r0, r1\n        # 40000c  blne    #FunctionB\n        irsb = pyvex.IRSB(\n            data=bytes.fromhex(\"04e02de50a10a0e3010050e10100001b\"),\n            mem_addr=0x400000,\n            arch=pyvex.ARCH_ARM_LE,\n            num_inst=4,\n            opt_level=i,\n        )\n        assert len(irsb.exit_statements) == 1\n        assert irsb.exit_statements[0][2].jumpkind == \"Ijk_Call\"\n        assert irsb.jumpkind == \"Ijk_Boring\"\n\n\ndef test_arm_postprocess_ret():\n    for i in range(3):\n        # e91ba8f0\n        # ldmdb  R11, {R4,R11,SP,PC}\n        irsb = pyvex.IRSB(\n            data=b\"\\xe9\\x1b\\xa8\\xf0\",\n            mem_addr=0xED4028,\n            arch=pyvex.ARCH_ARM_BE_LE,\n            num_inst=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Ret\"\n\n        # e91badf0\n        # ldmdb  R11, {R4-R8,R10,R11,SP,PC}\n        irsb = pyvex.IRSB(\n            data=b\"\\xe9\\x1b\\xa8\\xf0\",\n            mem_addr=0x4D4028,\n            arch=pyvex.ARCH_ARM_BE_LE,\n            num_inst=1,\n            opt_level=i,\n        )\n        assert irsb.jumpkind == \"Ijk_Ret\"\n\n        # 00a89de8\n        # ldmfd SP, {R11,SP,PC}\n        # Fixed by Fish in the VEX fork, commit 43c78f608490f9a5c71c7fca87c04759c1b93741\n        irsb = pyvex.IRSB(\n            data=b\"\\x00\\xa8\\x9d\\xe8\",\n            mem_addr=0xC800B57C,\n            arch=pyvex.ARCH_ARM_BE,\n            num_inst=1,\n            opt_level=1,\n        )\n        assert irsb.jumpkind == \"Ijk_Ret\"\n\n\nif __name__ == \"__main__\":\n    test_arm_postprocess_call()\n    test_arm_postprocess_ret()\n"
  },
  {
    "path": "tests/test_gym.py",
    "content": "# pylint: disable=missing-class-docstring\nimport unittest\n\nimport pyvex\n\n\nclass Tests(unittest.TestCase):\n    def test_x86_aam(self):\n        irsb = pyvex.lift(b\"\\xd4\\x0b\", 0, pyvex.ARCH_X86)\n        self.assertEqual(irsb.jumpkind, \"Ijk_Boring\")\n        self.assertEqual(irsb.size, 2)\n\n    def test_x86_aad(self):\n        irsb = pyvex.lift(b\"\\xd5\\x0b\", 0, pyvex.ARCH_X86)\n        self.assertEqual(irsb.jumpkind, \"Ijk_Boring\")\n        self.assertEqual(irsb.size, 2)\n\n    def test_x86_xgetbv(self):\n        irsb = pyvex.lift(b\"\\x0f\\x01\\xd0\", 0, pyvex.ARCH_X86)\n        self.assertEqual(irsb.jumpkind, \"Ijk_Boring\")\n        self.assertEqual(irsb.size, 3)\n\n    def test_x86_rdmsr(self):\n        irsb = pyvex.lift(b\"\\x0f\\x32\", 0, pyvex.ARCH_X86)\n        self.assertEqual(irsb.jumpkind, \"Ijk_Boring\")\n        self.assertEqual(irsb.size, 2)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_irsb_property_caching.py",
    "content": "# pylint: disable=missing-class-docstring,no-self-use\nimport unittest\n\nimport pyvex\n\n\nclass TestCacheInvalidationOnExtend(unittest.TestCase):\n    def test_cache_invalidation_on_extend(self):\n        b = pyvex.block.IRSB(b\"\\x50\", 0, pyvex.ARCH_X86)\n        assert b.size == 1\n        assert b.instructions == 1\n        toappend = pyvex.block.IRSB(b\"\\x51\", 0, pyvex.ARCH_X86)\n        toappend.jumpkind = \"Ijk_Invalid\"\n        toappend._direct_next = None  # Invalidate the cache because I manually changed the jumpkind\n        assert not toappend.direct_next\n        b.extend(toappend)\n        assert b.size == 2\n        assert b.instructions == 2\n        assert not b.direct_next\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_lift.py",
    "content": "import unittest\n\nimport pyvex\nfrom pyvex import IRSB, ffi, lift\nfrom pyvex.errors import PyVEXError\nfrom pyvex.lifting.util import GymratLifter, Instruction, JumpKind\n\n\n# pylint: disable=R0201\n# pylint: disable=C0115\nclass TestLift(unittest.TestCase):\n    def test_partial_lift(self):\n        \"\"\"This tests that gymrat correctly handles the case where an\n        instruction is longer than the remaining input.\n        \"\"\"\n\n        class NOP(Instruction):\n            name = \"nop\"\n            bin_format = \"0000111100001111\"\n\n            def compute_result(self, *args):\n                pass\n\n        class NOPLifter(GymratLifter):\n            instrs = [NOP]\n\n        lifter = NOPLifter(pyvex.ARCH_AMD64, 0)\n        # this should not throw an exception\n        block = lifter.lift(\"\\x0f\\x0fa\")\n        assert block.size == 2\n        assert block.instructions == 1\n        assert block.jumpkind == JumpKind.NoDecode\n\n    def test_skipstmts_toomanyexits(self):\n        # https://github.com/angr/pyvex/issues/153\n\n        old_exit_limit = IRSB.MAX_EXITS\n        IRSB.MAX_EXITS = 32\n\n        bytes_ = bytes.fromhex(\n            \"0DF1B00B2EAB94E8030008938BE803000DF1C0089AE8030083E\"\n            \"80300019B0DF1F00A339AE669E26193E8030085E8030098E803\"\n            \"0083E80300069B95E8030088E80300A26993E803004A9200236\"\n            \"3622362A361E362A36238AC029A069484E8030012AC09982993\"\n            \"28932B9303C885E8030092E8030084E803009AE8030082E8030\"\n            \"02A460A9D26993E910B9941910D9942910C992A93409548AD43\"\n            \"9194E803008AE8030027983F9927913F909BE803000DF5887B2\"\n            \"69335938BE803000DF58C7B089903C98BE8030098E8030084E8\"\n            \"030095E8030088E803004B993391329394E8030034933793369\"\n            \"3069C059B4C93049B4E9350ABCDF834C1CDF83CE185E8030094\"\n            \"E803004B9683E8030015A94498C4F7E2EA \"\n        )\n        arch = pyvex.ARCH_ARM_LE\n        # Lifting the first four bytes will not cause any problem. Statements should be skipped as expected\n        b = IRSB(bytes_[:34], 0xC6951, arch, opt_level=1, bytes_offset=5, skip_stmts=True)\n        assert len(b.exit_statements) > 0\n        assert not b.has_statements\n\n        # Lifting the entire block will cause the number of exit statements go\n        # beyond the limit (currently 32). PyVEX will\n        # automatically relift this block without skipping the statements\n        b = IRSB(bytes_, 0xC6951, arch, opt_level=1, bytes_offset=5, skip_stmts=True)\n        assert b.statements is not None\n        assert len(b.exit_statements) > 32\n\n        # Restore the setting\n        IRSB.MAX_EXITS = old_exit_limit\n\n    def test_max_bytes(self):\n        data = bytes.fromhex(\"909090909090c3\")\n        arch = pyvex.ARCH_X86\n        assert lift(data, 0x1000, arch, max_bytes=None).size == len(data)\n        assert lift(data, 0x1000, arch, max_bytes=len(data) - 1).size == len(data) - 1\n        assert lift(data, 0x1000, arch, max_bytes=len(data) + 1).size == len(data)\n\n        data2 = ffi.from_buffer(data)\n        self.assertRaises(PyVEXError, lift, data2, 0x1000, arch)\n        assert lift(data2, 0x1000, arch, max_bytes=len(data)).size == len(data)\n        assert lift(data2, 0x1000, arch, max_bytes=len(data) - 1).size == len(data) - 1\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_mips32_postprocess.py",
    "content": "import pyvex\n\n\ndef test_mips32_unconditional_jumps():\n    # 0040000c: 10000002 ; <input:28> beq $zero, $zero, LABEL_ELSE_IF\n    # 00400010: 00000000 ; <input:31> sll $zero, $zero, 0\n    # 00400014: 08100012 ; <input:34> j LABEL_DONE\n    # 00400018: <LABEL_ELSE_IF> ; <input:37> LABEL_ELSE_IF:\n    irsb = pyvex.IRSB(\n        data=(b\"\\x10\\x00\\x00\\x02\" b\"\\x00\\x00\\x00\\x00\"),\n        mem_addr=0x40000C,\n        arch=pyvex.ARCH_MIPS32_BE,\n        num_inst=2,\n        opt_level=0,\n    )\n    assert type(irsb.next) is pyvex.expr.Const\n    assert irsb.next.con.value == 0x400018\n\n\nif __name__ == \"__main__\":\n    test_mips32_unconditional_jumps()\n"
  },
  {
    "path": "tests/test_pyvex.py",
    "content": "import copy\nimport gc\nimport logging\nimport os\nimport random\nimport sys\nimport unittest\n\nimport pyvex\nfrom pyvex.lifting import LibVEXLifter\n\nif sys.platform == \"linux\":\n    import resource\n\n\n# pylint: disable=R0201\nclass TestPyvex(unittest.TestCase):\n    @unittest.skipUnless(\n        sys.platform == \"linux\", \"Cannot import the resource package on windows, values different on macos.\"\n    )\n    def test_memory(self):\n        arches = [pyvex.ARCH_X86, pyvex.ARCH_PPC32, pyvex.ARCH_AMD64, pyvex.ARCH_ARM_BE]\n        # we're not including ArchMIPS32 cause it segfaults sometimes\n\n        # disable logging, as that may fill up log buffers somewhere\n        logging.disable(logging.ERROR)\n\n        for _ in range(10000):\n            try:\n                s = os.urandom(32)\n                a = random.choice(arches)\n                p = pyvex.IRSB(data=s, mem_addr=0, arch=a)\n            except pyvex.PyVEXError:\n                pass\n\n        kb_start = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n\n        for _ in range(20000):\n            try:\n                s = os.urandom(32)\n                a = random.choice(arches)\n                p = pyvex.IRSB(data=s, mem_addr=0, arch=a)\n            except pyvex.PyVEXError:\n                pass\n        del p\n        gc.collect()\n\n        logging.disable(logging.NOTSET)\n\n        kb_end = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n\n        pyvex.pvc.clear_log()\n        pyvex.pvc.LibVEX_ShowAllocStats()\n        print(LibVEXLifter.get_vex_log())\n\n        # allow a 5mb leeway\n        assert kb_end - kb_start < 5000\n\n    ################\n    ### IRCallee ###\n    ################\n\n    def test_ircallee(self):\n        callee = pyvex.IRCallee(3, \"test_name\", 0xFFFFFF)\n        assert callee.name == \"test_name\"\n        assert callee.regparms == 3\n        assert callee.mcx_mask == 0xFFFFFF\n\n    ############\n    ### IRSB ###\n    ############\n\n    def test_irsb_empty(self):\n        self.assertRaises(Exception, pyvex.IRSB)\n        self.assertRaises(Exception, pyvex.IRSB, data=\"\", arch=pyvex.ARCH_AMD64, mem_addr=0)\n\n    def test_irsb_arm(self):\n        irsb = pyvex.IRSB(data=b\"\\x33\\xff\\x2f\\xe1\", mem_addr=0, arch=pyvex.ARCH_ARM_BE)\n        assert len([i for i in irsb.statements if isinstance(i, pyvex.IRStmt.IMark)]) == 1\n\n    def test_irsb_popret(self):\n        irsb = pyvex.IRSB(data=b\"\\x5d\\xc3\", mem_addr=0, arch=pyvex.ARCH_AMD64)\n        stmts = irsb.statements\n        irsb.pp()\n\n        assert len(stmts) > 0\n        assert irsb.jumpkind == \"Ijk_Ret\"\n        assert irsb.offsIP == 184\n\n        cursize = len(irsb.tyenv.types)\n        assert cursize > 0\n        print(irsb.statements[10].data)\n        print(irsb.statements[10].data.tmp)\n        print(irsb.tyenv.types[irsb.statements[10].data.tmp])\n        assert irsb.tyenv.lookup(irsb.statements[10].data.tmp) == \"Ity_I64\"\n\n    def test_two_irsb(self):\n        irsb1 = pyvex.IRSB(data=b\"\\x5d\\xc3\", mem_addr=0, arch=pyvex.ARCH_AMD64)\n        irsb2 = pyvex.IRSB(data=b\"\\x5d\\x5d\\x5d\\x5d\", mem_addr=0, arch=pyvex.ARCH_AMD64)\n\n        stmts1 = irsb1.statements\n        stmts2 = irsb2.statements\n\n        assert len(stmts1) != len(stmts2)\n\n    def test_irsb_deepCopy(self):\n        irsb = pyvex.IRSB(data=b\"\\x5d\\xc3\", mem_addr=0, arch=pyvex.ARCH_AMD64)\n        stmts = irsb.statements\n\n        irsb2 = copy.deepcopy(irsb)\n        stmts2 = irsb2.statements\n        assert len(stmts) == len(stmts2)\n\n    def test_irsb_addStmt(self):\n        irsb = pyvex.IRSB(data=b\"\\x5d\\xc3\", mem_addr=0, arch=pyvex.ARCH_AMD64)\n        stmts = irsb.statements\n\n        irsb2 = copy.deepcopy(irsb)\n        irsb2.statements = []\n        assert len(irsb2.statements) == 0\n\n        for n, i in enumerate(stmts):\n            assert len(irsb2.statements) == n\n            irsb2.statements.append(copy.deepcopy(i))\n\n        irsb2.pp()\n\n    def test_irsb_tyenv(self):\n        irsb = pyvex.IRSB(data=b\"\\x5d\\xc3\", mem_addr=0, arch=pyvex.ARCH_AMD64)\n        print(irsb.tyenv)\n        print(\"Orig\")\n        print(irsb.tyenv)\n\n        print(\"Empty\")\n        irsb2 = pyvex.IRSB.empty_block(arch=pyvex.ARCH_AMD64, addr=0)\n        print(irsb2.tyenv)\n\n        print(\"Unwrapped\")\n        irsb2.tyenv = copy.deepcopy(irsb.tyenv)\n        print(irsb2.tyenv)\n\n    ##################\n    ### Statements ###\n    ##################\n\n    def test_irstmt_pp(self):\n        irsb = pyvex.IRSB(data=b\"\\x5d\\xc3\", mem_addr=0, arch=pyvex.ARCH_AMD64)\n        stmts = irsb.statements\n        for i in stmts:\n            print(\"STMT: \", end=\" \")\n            print(i)\n\n    def test_irstmt_flat(self):\n        print(\"TODO\")\n\n    def test_irstmt_imark(self):\n        m = pyvex.IRStmt.IMark(1, 2, 3)\n        assert m.tag == \"Ist_IMark\"\n        assert m.addr == 1\n        assert m.len == 2\n        assert m.delta == 3\n\n        m.addr = 5\n        assert m.addr == 5\n        m.len = 5\n        assert m.len == 5\n        m.delta = 5\n        assert m.delta == 5\n\n        self.assertRaises(Exception, pyvex.IRStmt.IMark, ())\n\n    def test_irstmt_abihint(self):\n        self.assertRaises(Exception, pyvex.IRStmt.AbiHint, ())\n\n        a = pyvex.IRExpr.RdTmp.get_instance(123)\n        b = pyvex.IRExpr.RdTmp.get_instance(456)\n\n        m = pyvex.IRStmt.AbiHint(a, 10, b)\n        assert m.base.tmp == 123\n        assert m.len == 10\n        assert m.nia.tmp == 456\n\n    def test_irstmt_put(self):\n        self.assertRaises(Exception, pyvex.IRStmt.Put, ())\n\n        a = pyvex.IRExpr.RdTmp.get_instance(123)\n        m = pyvex.IRStmt.Put(a, 10)\n        print(\"Put stmt:\", end=\" \")\n        print(m)\n        print(\"\")\n        assert m.data.tmp == 123\n        assert m.offset == 10\n\n    def test_irexpr_puti(self):\n        r = pyvex.IRRegArray(10, \"Ity_I64\", 20)\n        i = pyvex.IRExpr.RdTmp.get_instance(5)\n        d = pyvex.IRExpr.RdTmp.get_instance(30)\n        m = pyvex.IRStmt.PutI(r, i, d, 2)\n        assert m.descr.base == 10\n        assert m.ix.tmp == 5\n        assert m.bias == 2\n        assert m.data.tmp == d.tmp\n\n        self.assertRaises(Exception, pyvex.IRStmt.PutI, ())\n\n    def test_irstmt_wrtmp(self):\n        self.assertRaises(Exception, pyvex.IRStmt.WrTmp, ())\n\n        a = pyvex.IRExpr.RdTmp.get_instance(123)\n        m = pyvex.IRStmt.WrTmp(10, a)\n        assert m.tag == \"Ist_WrTmp\"\n        assert m.tmp == 10\n        assert m.data.tmp == 123\n\n    def test_irstmt_store(self):\n        self.assertRaises(Exception, pyvex.IRStmt.Store, ())\n\n        a = pyvex.IRExpr.RdTmp.get_instance(123)\n        d = pyvex.IRExpr.RdTmp.get_instance(456)\n        m = pyvex.IRStmt.Store(a, d, \"Iend_LE\")\n        assert m.tag == \"Ist_Store\"\n        assert m.endness == \"Iend_LE\"\n        assert m.addr.tmp == a.tmp\n        assert m.data.tmp == d.tmp\n\n    def test_irstmt_cas(self):\n        self.assertRaises(Exception, pyvex.IRStmt.CAS, ())\n\n        a = pyvex.IRExpr.RdTmp.get_instance(10)\n        eh = pyvex.IRExpr.RdTmp.get_instance(11)\n        el = pyvex.IRExpr.RdTmp.get_instance(12)\n        dh = pyvex.IRExpr.RdTmp.get_instance(21)\n        dl = pyvex.IRExpr.RdTmp.get_instance(22)\n\n        args = {\n            \"oldHi\": 1,\n            \"oldLo\": 2,\n            \"end\": \"Iend_LE\",\n            \"addr\": a,\n            \"expdHi\": eh,\n            \"expdLo\": el,\n            \"dataHi\": dh,\n            \"dataLo\": dl,\n        }\n\n        m = pyvex.IRStmt.CAS(**args)\n        assert m.tag == \"Ist_CAS\"\n        assert m.endness == \"Iend_LE\"\n        assert m.oldHi == 1\n        assert m.oldLo == 2\n        assert m.addr.tmp == a.tmp\n        assert m.expdHi.tmp == eh.tmp\n        assert m.expdLo.tmp == el.tmp\n        assert m.dataHi.tmp == dh.tmp\n        assert m.dataLo.tmp == dl.tmp\n\n    def test_irstmt_loadg(self):\n        self.assertRaises(Exception, pyvex.IRStmt.LoadG, ())\n\n        a = pyvex.IRExpr.RdTmp.get_instance(10)\n        alt = pyvex.IRExpr.RdTmp.get_instance(11)\n        guard = pyvex.IRExpr.RdTmp.get_instance(12)\n\n        args = {\n            \"dst\": 1,\n            \"end\": \"Iend_LE\",\n            \"addr\": a,\n            \"alt\": alt,\n            \"guard\": guard,\n            \"cvt\": \"ILGop_Ident32\",\n        }\n\n        m = pyvex.IRStmt.LoadG(**args)\n        assert m.tag == \"Ist_LoadG\"\n        assert m.end == \"Iend_LE\"\n        assert m.cvt == \"ILGop_Ident32\"\n        assert m.dst == 1\n        assert m.addr.tmp == a.tmp\n        assert m.alt.tmp == alt.tmp\n        assert m.guard.tmp == guard.tmp\n\n        assert m.cvt_types == (\"Ity_I32\", \"Ity_I32\")\n\n    def test_irstmt_storeg(self):\n        self.assertRaises(Exception, pyvex.IRStmt.LoadG, ())\n\n        a = pyvex.IRExpr.RdTmp.get_instance(10)\n        data = pyvex.IRExpr.RdTmp.get_instance(11)\n        guard = pyvex.IRExpr.RdTmp.get_instance(12)\n\n        args = {\"end\": \"Iend_LE\", \"addr\": a, \"data\": data, \"guard\": guard}\n\n        m = pyvex.IRStmt.StoreG(**args)\n        assert m.tag == \"Ist_StoreG\"\n        assert m.end == \"Iend_LE\"\n        assert m.addr.tmp == a.tmp\n        assert m.data.tmp == data.tmp\n        assert m.guard.tmp == guard.tmp\n\n    def test_irstmt_llsc(self):\n        self.assertRaises(Exception, pyvex.IRStmt.LLSC)\n\n        a = pyvex.IRExpr.RdTmp.get_instance(123)\n        d = pyvex.IRExpr.RdTmp.get_instance(456)\n        m = pyvex.IRStmt.LLSC(a, d, 1, \"Iend_LE\")\n        assert m.tag == \"Ist_LLSC\"\n        assert m.endness == \"Iend_LE\"\n        assert m.result == 1\n        assert m.addr.tmp == a.tmp\n        assert m.storedata.tmp == d.tmp\n\n    def test_irstmt_mbe(self):\n        m = pyvex.IRStmt.MBE(\"Imbe_CancelReservation\")\n        assert m.event == \"Imbe_CancelReservation\"\n        m.event = \"Imbe_Fence\"\n        assert m.event == \"Imbe_Fence\"\n\n    def test_irstmt_dirty(self):\n        args = [pyvex.IRExpr.RdTmp.get_instance(i) for i in range(10)]\n        m = pyvex.IRStmt.Dirty(\"test_dirty\", pyvex.IRConst.U8(1), args, 15, \"Ifx_None\", 0, 1, 0)\n        assert m.cee == \"test_dirty\"\n        assert isinstance(m.guard, pyvex.IRConst.U8)\n        assert m.tmp == 15\n        assert m.mFx == \"Ifx_None\"\n        assert m.nFxState == 0\n\n        for n, a in enumerate(m.args):\n            assert a.tmp == args[n].tmp\n\n    def test_irstmt_exit(self):\n        self.assertRaises(Exception, pyvex.IRStmt.Exit)\n\n        g = pyvex.IRExpr.RdTmp.get_instance(123)\n        d = pyvex.IRConst.U32(456)\n\n        m = pyvex.IRStmt.Exit(g, d, \"Ijk_Ret\", 10)\n        assert m.tag == \"Ist_Exit\"\n        assert m.jumpkind == \"Ijk_Ret\"\n        assert m.offsIP == 10\n        assert m.guard.tmp == g.tmp\n        assert m.dst.value == d.value\n\n    ##################\n    ### IRRegArray ###\n    ##################\n\n    def test_irregarray(self):\n        m = pyvex.IRRegArray(10, \"Ity_I64\", 20)\n\n        assert m.nElems == 20\n        assert m.elemTy == \"Ity_I64\"\n        assert m.base == 10\n\n    ################\n    ### IRConst.s ###\n    ################\n\n    def helper_const_subtype(self, subtype, tag, value):\n        print(\"Testing %s\" % tag)\n        self.assertRaises(Exception, subtype)\n\n        c = subtype(value)\n        assert c.tag == tag\n        assert c.value == value\n\n        d = subtype(value - 1)\n        e = subtype(value)\n        assert c.value == e.value\n        assert e.value == c.value\n        self.assertNotEqual(c.value, d.value)\n        self.assertNotEqual(d.value, c.value)\n        self.assertNotEqual(c.value, \"test\")\n\n        # TODO: actually check value\n        assert c.type == d.type\n\n    def test_irconst(self):\n        self.helper_const_subtype(pyvex.IRConst.U1, \"Ico_U1\", 1)\n        self.helper_const_subtype(pyvex.IRConst.U8, \"Ico_U8\", 233)\n        self.helper_const_subtype(pyvex.IRConst.U16, \"Ico_U16\", 39852)\n        self.helper_const_subtype(pyvex.IRConst.U32, \"Ico_U32\", 3442312356)\n        self.helper_const_subtype(pyvex.IRConst.U64, \"Ico_U64\", 823452334523623455)\n        self.helper_const_subtype(pyvex.IRConst.F32, \"Ico_F32\", 13453.234375)\n        self.helper_const_subtype(pyvex.IRConst.F32i, \"Ico_F32i\", 3442312356)\n        self.helper_const_subtype(pyvex.IRConst.F64, \"Ico_F64\", 13453.234525)\n        self.helper_const_subtype(pyvex.IRConst.F64i, \"Ico_F64i\", 823457234523623455)\n        self.helper_const_subtype(pyvex.IRConst.V128, \"Ico_V128\", 39852)\n        self.helper_const_subtype(pyvex.IRConst.V256, \"Ico_V256\", 3442312356)\n\n    ###################\n    ### Expressions ###\n    ###################\n\n    def test_irexpr_binder(self):\n        # binder doesn't work statically, but hopefully we should\n        # never see it, anyways\n        return\n        # m = pyvex.IRExpr.Binder(1534252)\n        # assert m.binder == 1534252\n\n    def test_irexpr_geti(self):\n        r = pyvex.IRRegArray(10, \"Ity_I64\", 20)\n        i = pyvex.IRExpr.RdTmp.get_instance(5)\n        m = pyvex.IRExpr.GetI(r, i, 2)\n        assert m.description.base == 10\n        assert m.index.tmp == 5\n        assert m.bias == 2\n\n        self.assertRaises(Exception, pyvex.IRExpr.GetI)\n\n    def test_irexpr_rdtmp(self):\n        m = pyvex.IRExpr.RdTmp.get_instance(123)\n        assert m.tag == \"Iex_RdTmp\"\n        assert m.tmp == 123\n\n        irsb = pyvex.IRSB(b\"\\x90\\x5d\\xc3\", mem_addr=0x0, arch=pyvex.ARCH_AMD64)\n        print(\"TMP:\", irsb.next.tmp)\n\n    def test_irexpr_get(self):\n        m = pyvex.IRExpr.Get(0, \"Ity_I64\")\n        assert m.type == \"Ity_I64\"\n\n        self.assertRaises(Exception, pyvex.IRExpr.Get)\n\n    def test_irexpr_qop(self):\n        a = pyvex.IRExpr.Get(0, \"Ity_I64\")\n        b = pyvex.IRExpr.Get(184, \"Ity_I64\")\n        c = pyvex.IRExpr.RdTmp.get_instance(1)\n        d = pyvex.IRExpr.RdTmp.get_instance(2)\n        op = \"Iop_QAdd32S\"\n\n        m = pyvex.IRExpr.Qop(op, [a, b, c, d])\n\n        assert m.op == op\n        assert m.args[1].type == b.type\n\n        assert len(m.args) == 4\n        assert m.args[2].tmp == c.tmp\n\n    def test_irexpr_triop(self):\n        a = pyvex.IRExpr.Get(0, \"Ity_I64\")\n        b = pyvex.IRExpr.Get(184, \"Ity_I64\")\n        c = pyvex.IRExpr.RdTmp.get_instance(1)\n        op = \"Iop_MAddF64\"\n\n        m = pyvex.IRExpr.Triop(op, [a, b, c])\n\n        assert m.op == op\n        assert m.args[1].type == b.type\n\n        assert len(m.args) == 3\n        assert m.args[2].tmp == c.tmp\n\n    def test_irexpr_binop(self):\n        a = pyvex.IRExpr.Get(0, \"Ity_I64\")\n        c = pyvex.IRExpr.RdTmp.get_instance(1)\n        op = \"Iop_Add64\"\n\n        m = pyvex.IRExpr.Binop(op, [a, c])\n\n        assert m.op == op\n        assert m.args[1].tmp == c.tmp\n\n        assert len(m.args) == 2\n        assert m.args[1].tmp == c.tmp\n\n    def test_irexpr_unop(self):\n        a = pyvex.IRExpr.Get(0, \"Ity_I64\")\n        op = \"Iop_Add64\"\n\n        m = pyvex.IRExpr.Unop(op, [a])\n\n        assert m.op == op\n        assert len(m.args) == 1\n        assert m.args[0].offset == a.offset\n\n    def test_irexpr_load(self):\n        a = pyvex.IRExpr.Get(0, \"Ity_I64\")\n        e = \"Iend_LE\"\n        t = \"Ity_I64\"\n\n        m = pyvex.IRExpr.Load(e, t, a)\n\n        assert m.endness == e\n        assert m.type == t\n\n    def test_irexpr_const(self):\n        u1 = pyvex.IRConst.U1(1)\n        f64 = pyvex.IRConst.F64(1.123)\n\n        ue = pyvex.IRExpr.Const(u1)\n        _ = pyvex.IRExpr.Const(f64)\n\n        assert ue.con.value == u1.value\n        assert ue.con.value != f64.value\n\n    def test_irexpr_ite(self):\n        a = pyvex.IRExpr.Get(0, \"Ity_I64\")\n        iffalse = pyvex.IRExpr.RdTmp.get_instance(1)\n        iftrue = pyvex.IRExpr.Const(pyvex.IRConst.U8(200))\n\n        m = pyvex.IRExpr.ITE(a, iffalse, iftrue)\n\n        assert m.iftrue.con.value == iftrue.con.value\n\n    def test_irexpr_ccall(self):\n        callee = pyvex.IRCallee(3, \"test_name\", 0xFFFFFF)\n        args = [pyvex.IRExpr.RdTmp.get_instance(i) for i in range(10)]\n\n        m = pyvex.IRExpr.CCall(\"Ity_I64\", callee, args)\n\n        assert len(m.args) == len(args)\n        assert m.ret_type == \"Ity_I64\"\n\n        for n, a in enumerate(m.args):\n            assert a.tmp == args[n].tmp\n\n        m = pyvex.IRExpr.CCall(callee, \"Ity_I64\", ())\n        assert len(m.args) == 0\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tests/test_s390x_exrl.py",
    "content": "import pyvex\n\n\ndef test_s390x_exrl():\n    arch = pyvex.ARCH_S390X\n    irsb = pyvex.lift(\n        b\"\\xc6\\x10\\x00\\x00\\x00\\x04\"  # exrl %r1,0x400408\n        b\"\\x07\\xfe\"  # br %r14\n        b\"\\xd7\\x00\\x20\\x00\\x30\\x00\"  # xc 0(0,%r2),0(%r3)\n        b\"\\x7d\\xa7\",  # padding\n        0x400400,\n        arch,\n    )\n    irsb_str = str(irsb)\n\n    # check last_execute_target, only top 6 bytes are relevant\n    assert \"0xd700200030000000\" in irsb_str\n    assert \"s390x_dirtyhelper_EX\" in irsb_str\n    assert \"{ PUT(ia) = 0x400400; Ijk_Boring }\" in irsb_str\n    assert \"------ IMark(0x400406, 2, 0) ------\" in irsb_str\n    assert irsb.jumpkind == \"Ijk_Ret\"\n\n\nif __name__ == \"__main__\":\n    test_s390x_exrl()\n"
  },
  {
    "path": "tests/test_s390x_lochi.py",
    "content": "import pyvex\n\n\ndef test_s390x_lochi():\n    arch = pyvex.ARCH_S390X\n    irsb = pyvex.lift(b\"\\xec\\x18\\xab\\xcd\\x00\\x42\", 0x400400, arch)  # lochi %r1,0xabcd,8\n    irsb_str = str(irsb)\n\n    assert \"s390_calculate_cond(0x0000000000000008\" in irsb_str\n    assert \"PUT(r1_32) = 0xffffabcd\" in irsb_str\n    assert irsb.jumpkind in \"Ijk_Boring\"\n\n\nif __name__ == \"__main__\":\n    test_s390x_lochi()\n"
  },
  {
    "path": "tests/test_s390x_vl.py",
    "content": "#!/usr/bin/env python3\nimport pyvex\n\n\ndef test_s390x_vl():\n    arch = pyvex.ARCH_S390X\n    irsb = pyvex.lift(b\"\\xe7\\x40\\x90\\xa8\\x00\\x06\", 0x11C6C9E, arch)  # vl %v4, 0xa8(%r9)\n    irsb_str = str(irsb)\n\n    assert \"GET:I64(r9)\" in irsb_str\n    assert \"Add64(0x00000000000000a8\" in irsb_str\n    assert \"LDbe:V128\" in irsb_str\n    assert \"PUT(v4) =\" in irsb_str\n    assert irsb.jumpkind == \"Ijk_Boring\"\n\n\nif __name__ == \"__main__\":\n    test_s390x_vl()\n"
  },
  {
    "path": "tests/test_spotter.py",
    "content": "import os\n\nimport pyvex\nimport pyvex.lifting\nfrom pyvex.lifting import register\nfrom pyvex.lifting.util import GymratLifter, Instruction, Type\n\ntest_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../../binaries/tests\"))\n\n\nclass Instruction_IMAGINARY(Instruction):\n    bin_format = bin(0x0F0B)[2:].zfill(16)\n    name = \"IMAGINARY\"\n\n    def compute_result(self):\n        a = self.constant(10, Type.int_27)\n        b = self.constant(20, Type.int_27)\n        a + b\n\n\nclass ImaginarySpotter(GymratLifter):\n    instrs = [Instruction_IMAGINARY]\n\n\nregister(ImaginarySpotter, \"X86\")\n\nbasic_goal = \"\"\"\nIRSB {\n   t0:Ity_I27\n\n   00 | ------ IMark(0x1, 2, 0) ------\n   01 | t0 = Add27((0xa :: Ity_I27),(0x14 :: Ity_I27))\n   NEXT: PUT(eip) = 0x00000003; Ijk_Boring\n}\n\"\"\"\n\n\ndef test_basic():\n    b = pyvex.block.IRSB(b\"\\x0f\\x0b\", 1, pyvex.ARCH_X86)\n    assert str(b).strip() == basic_goal.strip()\n\n\ndef test_embedded():\n    b = pyvex.block.IRSB(b\"\\x50\" * 3 + b\"\\x0f\\x0b\" + b\"\\x50\" * 6, 1, pyvex.ARCH_X86)\n    for i, stmt in enumerate(b.statements):\n        if type(stmt) is pyvex.stmt.IMark and stmt.addr == 0x4 and stmt.len == 2 and stmt.delta == 0:\n            imaginary_trans_stmt = b.statements[i + 1]\n            assert type(imaginary_trans_stmt) is pyvex.stmt.WrTmp\n            addexpr = imaginary_trans_stmt.data\n            assert type(addexpr) is pyvex.expr.Binop\n            assert addexpr.op == \"Iop_Add27\"\n            arg1, arg2 = addexpr.args\n            assert type(arg1) is pyvex.expr.Const\n            assert arg1.con.value == 10\n            assert type(arg2) is pyvex.expr.Const\n            assert arg2.con.value == 20\n            return\n    assert False, \"Could not find matching IMark\"\n\n\nclass Instruction_MSR(Instruction):\n    bin_format = bin(0x8808F380)[2:].zfill(32)\n    name = \"MSR.W\"\n\n    def compute_result(self):\n        a = self.constant(10, Type.int_27)\n        b = self.constant(20, Type.int_27)\n        a + b\n\n\nclass Instruction_CPSIEI(Instruction):\n    bin_format = bin(0xB662)[2:].zfill(16)\n    name = \"CPSIE I\"\n\n    def compute_result(self):\n        a = self.constant(10, Type.int_27)\n        b = self.constant(20, Type.int_27)\n        a + b\n\n\nclass Instruction_CPSIEF(Instruction):\n    bin_format = bin(0xB661)[2:].zfill(16)\n    name = \"CPSIE F\"\n\n    def compute_result(self):\n        a = self.constant(10, Type.int_27)\n        b = self.constant(20, Type.int_27)\n        a + b\n\n\nclass CortexSpotter(GymratLifter):\n    instrs = [Instruction_MSR, Instruction_CPSIEI, Instruction_CPSIEF]\n\n\nregister(CortexSpotter, \"ARMEL\")\n\n\ndef test_tmrs():\n    arch = pyvex.ARCH_ARM_LE\n    ins = b\"\\xef\\xf3\\x08\\x82\"\n    b = pyvex.block.IRSB(ins, 1, arch)\n    assert b.jumpkind == \"Ijk_Boring\"\n    assert isinstance(b.statements[1].data, pyvex.expr.Get)\n    assert arch.translate_register_name(b.statements[1].data.offset) in [\"sp\", \"r13\"]\n    assert isinstance(b.statements[2], pyvex.stmt.Put)\n\n\ndef test_tmsr():\n    arch = pyvex.ARCH_ARM_LE\n    inss = b\"\\x82\\xf3\\x08\\x88\"\n    b = pyvex.block.IRSB(inss, 1, arch, opt_level=3)\n    assert b.jumpkind == \"Ijk_Boring\"\n    assert isinstance(b.statements[1].data, pyvex.expr.Get)\n    assert arch.translate_register_name(b.statements[1].data.offset) == \"r2\"\n    assert isinstance(b.statements[2], pyvex.stmt.Put)\n\n\nif __name__ == \"__main__\":\n    test_basic()\n    test_embedded()\n    test_tmrs()\n    test_tmsr()\n"
  },
  {
    "path": "tests/test_ud2.py",
    "content": "import pyvex\n\n\ndef test_ud2():\n    # On x86 and amd64, ud2 is a valid 2-byte instruction that means \"undefined instruction\". Upon decoding a basic\n    # block that ends with ud2, we should treat it as an explicit NoDecode, instead of skipping the instruction and\n    # resume lifting.\n\n    b = pyvex.block.IRSB(b\"\\x90\\x90\\x0f\\x0b\\x90\\x90\", 0x20, pyvex.ARCH_AMD64)\n    assert b.jumpkind == \"Ijk_NoDecode\"\n    assert b.next.con.value == 0x22\n    assert b.size == 4\n\n\nif __name__ == \"__main__\":\n    test_ud2()\n"
  }
]