[
  {
    "path": ".gitattributes",
    "content": "* text=auto !eol svneol=native#text/plain\n*.gitattributes text svneol=native#text/plain\n\n# Scriptish formats\n*.bat        text svneol=native#text/plain\n*.bsh        text svneol=native#text/x-beanshell\n*.cgi        text svneol=native#text/plain\n*.cmd        text svneol=native#text/plain\n*.js         text svneol=native#text/javascript\n*.php        text svneol=native#text/x-php\n*.pl         text svneol=native#text/x-perl\n*.pm         text svneol=native#text/x-perl\n*.py         text svneol=native#text/x-python\n*.sh         eol=lf svneol=LF#text/x-sh\nconfigure    eol=lf svneol=LF#text/x-sh\n\n# Image formats\n*.bmp        binary svneol=unset#image/bmp\n*.gif        binary svneol=unset#image/gif\n*.ico        binary svneol=unset#image/ico\n*.jpeg       binary svneol=unset#image/jpeg\n*.jpg        binary svneol=unset#image/jpeg\n*.png        binary svneol=unset#image/png\n*.tif        binary svneol=unset#image/tiff\n*.tiff       binary svneol=unset#image/tiff\n*.svg        text svneol=native#image/svg%2Bxml\n\n# Data formats\n*.pdf        binary svneol=unset#application/pdf\n*.avi        binary svneol=unset#video/avi\n*.doc        binary svneol=unset#application/msword\n*.dsp        text svneol=crlf#text/plain\n*.dsw        text svneol=crlf#text/plain\n*.eps        binary svneol=unset#application/postscript\n*.json       text svneol=native#application/json\n*.gz         binary svneol=unset#application/gzip\n*.mov        binary svneol=unset#video/quicktime\n*.mp3        binary svneol=unset#audio/mpeg\n*.ppt        binary svneol=unset#application/vnd.ms-powerpoint\n*.ps         binary svneol=unset#application/postscript\n*.psd        binary svneol=unset#application/photoshop\n*.rdf        binary svneol=unset#text/rdf\n*.rss        text svneol=unset#text/xml\n*.rtf        binary svneol=unset#text/rtf\n*.sln        text svneol=native#text/plain\n*.swf        binary svneol=unset#application/x-shockwave-flash\n*.tgz        binary svneol=unset#application/gzip\n*.vcproj     text svneol=native#text/xml\n*.vcxproj    text svneol=native#text/xml\n*.vsprops    text svneol=native#text/xml\n*.wav        binary svneol=unset#audio/wav\n*.xls        binary svneol=unset#application/vnd.ms-excel\n*.zip        binary svneol=unset#application/zip\n\n# Text formats\n.htaccess    text svneol=native#text/plain\n*.bbk        text svneol=native#text/xml\n*.cmake      text svneol=native#text/plain\n*.css        text svneol=native#text/css\n*.dtd        text svneol=native#text/xml\n*.htm        text svneol=native#text/html\n*.html       text svneol=native#text/html\n*.ini        text svneol=native#text/plain\n*.log        text svneol=native#text/plain\n*.mak        text svneol=native#text/plain\n*.qbk        text svneol=native#text/plain\n*.rst        text svneol=native#text/plain\n*.sql        text svneol=native#text/x-sql\n*.txt        text svneol=native#text/plain\n*.xhtml      text svneol=native#text/xhtml%2Bxml\n*.xml        text svneol=native#text/xml\n*.xsd        text svneol=native#text/xml\n*.xsl        text svneol=native#text/xml\n*.xslt       text svneol=native#text/xml\n*.xul        text svneol=native#text/xul\n*.yml        text svneol=native#text/plain\nboost-no-inspect text svneol=native#text/plain\nCHANGES      text svneol=native#text/plain\nCOPYING      text svneol=native#text/plain\nINSTALL      text svneol=native#text/plain\nJamfile      text svneol=native#text/plain\nJamroot      text svneol=native#text/plain\nJamfile.v2   text svneol=native#text/plain\nJamrules     text svneol=native#text/plain\nMakefile*    text svneol=native#text/plain\nREADME       text svneol=native#text/plain\nTODO         text svneol=native#text/plain\n\n# Code formats\n*.c          text svneol=native#text/plain\n*.cpp        text svneol=native#text/plain\n*.h          text svneol=native#text/plain\n*.hpp        text svneol=native#text/plain\n*.ipp        text svneol=native#text/plain\n*.tpp        text svneol=native#text/plain\n*.jam        text svneol=native#text/plain\n*.java       text svneol=native#text/plain\n"
  },
  {
    "path": ".github/FUNDING.yml",
    "content": "github: ned14\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    branches:\n      - master\n  pull_request:\n\njobs:\n  Build:\n    runs-on: ubuntu-24.04\n    strategy:\n      matrix:\n        python-version: ['3.x', 'pypy3.11']\n        \n    steps:\n    - uses: actions/checkout@v5\n    - uses: actions/setup-python@v6\n      with:\n        python-version: ${{ matrix.python-version }}\n  \n    - shell: bash\n      run: |\n        pip install -r requirements.txt\n        git submodule update --init --recursive\n\n    - name: Build\n      shell: bash\n      run: |\n        python setup.py build\n\n  Test:\n    runs-on: ubuntu-24.04\n    strategy:\n      matrix:\n        python-version: ['3.x', 'pypy3.11']\n        \n    steps:\n    - uses: actions/checkout@v5\n    - uses: actions/setup-python@v6\n      with:\n        python-version: ${{ matrix.python-version }}\n    \n    - shell: bash\n      run: |\n        pip install -r requirements.txt\n        git submodule update --init --recursive\n\n    - name: Test\n      shell: bash\n      run: |\n        pip install pytest\n        python -m pytest tests/ -v\n\n  Install-Pip:\n    runs-on: ubuntu-24.04\n    strategy:\n      matrix:\n        python-version: ['3.x', 'pypy3.11']\n        \n    steps:\n    - uses: actions/checkout@v5\n    - uses: actions/setup-python@v6\n      with:\n        python-version: ${{ matrix.python-version }}\n    \n    - shell: bash\n      run: |\n        pip install -r requirements.txt\n        git submodule update --init --recursive\n\n    - name: Install with pip\n      shell: bash\n      run: |\n        python setup.py install\n        pcpp --version\n\n  Install-Uv:\n    runs-on: ubuntu-24.04\n    strategy:\n      matrix:\n        python-version: ['3.x', 'pypy3.11']\n        \n    steps:\n    - uses: actions/checkout@v5\n    - uses: actions/setup-python@v6\n      with:\n        python-version: ${{ matrix.python-version }}\n    \n    - name: Install uv\n      shell: bash\n      run: |\n        curl -LsSf https://astral.sh/uv/install.sh | sh\n        echo \"$HOME/.cargo/bin\" >> $GITHUB_PATH\n    \n    - shell: bash\n      run: |\n        git submodule update --init --recursive\n\n    - name: Install with uv\n      shell: bash\n      run: |\n        uv venv test_env\n        source test_env/bin/activate\n        uv pip install -e .\n        pcpp --version\n\n  Test-Uv:\n    runs-on: ubuntu-24.04\n    strategy:\n      matrix:\n        python-version: ['3.x', 'pypy3.11']\n        \n    steps:\n    - uses: actions/checkout@v5\n    - uses: actions/setup-python@v6\n      with:\n        python-version: ${{ matrix.python-version }}\n    \n    - name: Install uv\n      shell: bash\n      run: |\n        curl -LsSf https://astral.sh/uv/install.sh | sh\n        echo \"$HOME/.cargo/bin\" >> $GITHUB_PATH\n    \n    - shell: bash\n      run: |\n        git submodule update --init --recursive\n        \n    - name: Install dependencies and pcpp with uv\n      shell: bash\n      run: |\n        uv venv test_env\n        source test_env/bin/activate\n        uv pip install -e .\n        uv pip install pytest\n        python -m pytest tests/ -v\n"
  },
  {
    "path": ".gitignore",
    "content": "*.pyc\nbuild/*\ndist/*\npcpp.egg-info/*\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"pcpp/ply\"]\n\tpath = pcpp/ply\n\turl = https://github.com/ned14/ply.git\n\tbranch = master\n\tignore = untracked\n"
  },
  {
    "path": "AGENTS.md",
    "content": "# Agent Overview for pcpp (Pure Python C Preprocessor)\n\n## How to build\n- `python setup.py build`\n\n## How to test\n- `python setup.py test`\n\n## Instructions\n1. Ignore everything within the `ply` submodule.\n2. Run tests before making a change.\n3. Run tests after making a change.\n4. If writing a unit test, always use the `unittest` framework.\n\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "(C) 2018-2026 Niall Douglas http://www.nedproductions.biz/\nand (C) 2007-2019 David Beazley http://www.dabeaz.com/\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n* Redistributions of source code must retain the above copyright notice,\n  this list of conditions and the following disclaimer.  \n* Redistributions in binary form must reproduce the above copyright notice, \n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.  \n* Neither the name of the David Beazley or Dabeaz LLC may be used to\n  endorse or promote products derived from this software without\n  specific prior written permission. \n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.rst",
    "content": "A C99 preprocessor written in pure Python\n=========================================\n.. role:: c(code)\n   :language: c\n\n.. |travis| image:: https://github.com/ned14/pcpp/actions/workflows/ci.yml/badge.svg?branch=master\n     :align: middle\n     :target: https://github.com/ned14/pcpp/actions\n\n\\(C) 2018-2026 Niall Douglas http://www.nedproductions.biz/ and (C) 2007-2020 David Beazley http://www.dabeaz.com/\n\nPyPI: https://pypi.python.org/pypi/pcpp Github: https://github.com/ned14/pcpp API reference docs: https://ned14.github.io/pcpp/\n\nTravis master branch all tests passing for Python 3 and PyPy3: |travis|\n\nA pure universal Python C (pre-)preprocessor implementation very useful for pre-preprocessing header only\nC++ libraries into single file includes and other such build or packaging stage malarky.\nThe implementation can be used as a Python module (`see API reference <https://ned14.github.io/pcpp/>`_)\nor as a command line tool ``pcpp`` which\ncan stand in for a conventional C preprocessor (i.e. it'll accept similar arguments).\nWorks great under PyPy, and you can expect performance gains of between 0.84x and 2.62x\n(average = 2.2x, median = 2.31x).\n\nTo install pcpp, you can use either pip or uv:\n\nUsing pip:\n::\n\n    pip install pcpp\n\nUsing uv (faster installation):\n::\n\n    uv install pcpp\n\n\nYour includes can be benchmarked for heft in order to improve your build times! See\nthe ``--times`` and ``--filetimes`` options, and you can see graphs from pcpp for the\nC++ STLs at https://github.com/ned14/stl-header-heft.\n\nA very unique facility of this C preprocessor is *partial* preprocessing so you can\nprogrammatically control how much preprocessing is done by ``pcpp`` and how much is\ndone by the C or C++ compiler's preprocessor. The ultimate control is by subclassing\nthe :c:`Preprocessor` class in Python from which you can do anything you like, however\nfor your convenience the ``pcpp`` command line tool comes with the following canned\npartial preprocessing algorithms:\n\n**passthru-defines**\n  Pass through but still execute #defines and #undefs if not always removed by\n  preprocessor logic. This ensures that including the output sets exactly the same\n  macros as if you included the original, plus include guards work.\n\n**passthru-unfound-includes**\n  If an :c:`#include` is not found, pass it through unmodified. This is very useful\n  for passing through includes of system headers.\n\n**passthru-undefined-exprs**\n  This is one of the most powerful pass through algorithms. If an expression passed to\n  :c:`#if` (or its brethern) contains an unknown macro, expand the expression with\n  known macros and pass through *unexecuted*, and then pass through the remaining block.\n  Each :c:`#elif` is evaluated in turn and if it does not contain unknown macros, it will be\n  executed immediately. Finally, any :c:`#else` clause is always passed through *unexecuted*.\n  Note that include guards normally defeat this algorithm, so those are specially detected and\n  ignored.\n\n**passthru-comments**\n  A major use case for ``pcpp`` is as a preprocessor for the `doxygen <http://www.stack.nl/~dimitri/doxygen/>`_\n  reference documentation tool whose preprocessor is unable to handle any preprocessing\n  of any complexity. ``pcpp`` can partially execute the preprocessing which doxygen\n  is incapable of, thus generating output which produces good results with doxygen.\n  Hence the ability to pass through comments containing doxygen markup is very useful.\n\n**passthru-magic-macros**\n  Don't expand ``__DATE__``, ``__TIME__``, ``__FILE__``, ``__LINE__`` nor ``__COUNTER__``.\n\n**passthru-includes**\n  Don't expand those ``#include`` whose arguments match the supplied regular expression\n  into the output, however still execute those includes. This lets you generate output\n  with macros from nested includes expanded, however those ``#include`` matching\n  the regular expression are passed through into the output.\n\n\nStandards (non-)compliance\n--------------------------\n``pcpp`` passes a very slightly modified edition of the `mcpp <http://mcpp.sourceforge.net/>`_\nunit test suite. The only modifications done were to disable the digraph and trigraphs tests.\nIt also passes the list of \"preprocessor torture\" expansion fragments\nin the C11 standard, correctly expanding some very complex recursive macro expansions\nwhere expansions cause new macro expansions to be formed. In this, it handily beats\nthe MSVC preprocessor and ought to handle most C99 preprocessor metaprogramming.\nIf you compare its output side-by-side to that of GCC or clang's preprocessor, results\nare extremely close indeed with blank line collapsing being the only difference.\n\nAs of v1.30 (Oct 2020), a proper yacc based expression evaluator for :c:`#if`\nexpressions is used which is standards conforming, and fixes a large number of\nproblems found in the previous Python :c:`eval()` based expression evaluator.\n\nA full, detailed list of known non-conformance with the C99 standard is below.\nPull requests with bug fixes and new unit tests for the fix are welcome.\n\nOn Python 3, input and output files can have your choice of encoding, and you can\nhook file open to inspect the encoding using ``chardet``.\n\nNote that most of this preprocessor was written originally by David Beazley to show\noff his excellent Python Lex-Yacc library PLY (http://www.dabeaz.com/ply/) and is\nhidden in there without being at all obvious given the number of Stack Overflow\nquestions which have asked for a pure Python C preprocessor implementation. This\nimplementation fixes a lot of conformance bugs (the original was never intended to\nrigidly adhere to the C standard) and adds in a test suite based on the C11 preprocessor\ntorture samples plus the mcpp preprocessor test suite. Still, this project would\nnot be possible without David's work, so please take off your hat and give a bow towards him.\n\nCommand line tool ``pcpp``:\n---------------------------\nThe help from the command line tool ``pcpp``::\n\n    usage: pcpp [-h] [-o [path]] [-D macro[=val]] [-U macro] [-N macro] [-I path]\n                [--passthru-defines] [--passthru-unfound-includes]\n                [--passthru-unknown-exprs] [--passthru-comments]\n                [--passthru-magic-macros] [--passthru-includes <regex>]\n                [--disable-auto-pragma-once] [--line-directive [form]] [--debug]\n                [--time] [--filetimes [path]] [--compress]\n                [--assume-input-encoding <encoding>]\n                [--output-encoding <encoding>] [--write-bom] [--version]\n                [input [input ...]]\n\n    A pure universal Python C (pre-)preprocessor implementation very useful for\n    pre-preprocessing header only C++ libraries into single file includes and\n    other such build or packaging stage malarky.\n\n    positional arguments:\n      input                 Files to preprocess (use '-' for stdin)\n\n    optional arguments:\n      -h, --help            show this help message and exit\n      -o [path]             Output to a file instead of stdout\n      -D macro[=val]        Predefine name as a macro [with value]\n      -U macro              Pre-undefine name as a macro\n      -N macro              Never define name as a macro, even if defined during\n                            the preprocessing.\n      -I path               Path to search for unfound #include's\n      --passthru-defines    Pass through but still execute #defines and #undefs if\n                            not always removed by preprocessor logic\n      --passthru-unfound-includes\n                            Pass through #includes not found without execution\n      --passthru-unknown-exprs\n                            Unknown macros in expressions cause preprocessor logic\n                            to be passed through instead of executed by treating\n                            unknown macros as 0L\n      --passthru-comments   Pass through comments unmodified\n      --passthru-magic-macros\n                            Pass through double underscore magic macros unmodified\n      --passthru-includes <regex>\n                            Regular expression for which #includes to not expand.\n                            #includes, if found, are always executed\n      --disable-auto-pragma-once\n                            Disable the heuristics which auto apply #pragma once\n                            to #include files wholly wrapped in an obvious include\n                            guard macro\n      --line-directive [form]\n                            Form of line directive to use, defaults to #line,\n                            specify nothing to disable output of line directives\n      --debug               Generate a pcpp_debug.log file logging execution\n      --time                Print the time it took to #include each file\n      --filetimes [path]    Write CSV file with time spent inside each included\n                            file, inclusive and exclusive\n      --compress            Make output as small as possible\n      --assume-input-encoding <encoding>\n                            The text encoding to assume inputs are in\n      --output-encoding <encoding>\n                            The text encoding to use when writing files\n      --write-bom           Prefix any output with a Unicode BOM\n      --version             show program's version number and exit\n\n    Note that so pcpp can stand in for other preprocessor tooling, it ignores any\n    arguments it does not understand.\n\nQuick demo of pass through mode\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nLet us look at an example for pass through mode. Here is the original:\n\n.. code-block:: c\n\n    #if !defined(__cpp_constexpr)\n    #if __cplusplus >= 201402L\n    #define __cpp_constexpr 201304  // relaxed constexpr\n    #else\n    #define __cpp_constexpr 190000\n    #endif\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n    #if __cpp_constexpr >= 201304\n    #define BOOSTLITE_CONSTEXPR constexpr\n    #endif\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n    #define BOOSTLITE_CONSTEXPR\n    #endif\n\n``pcpp test.h --passthru-defines --passthru-unknown-exprs`` will output:\n\n.. code-block:: c\n\n    #if !defined(__cpp_constexpr)\n    #if __cplusplus >= 201402\n    #define __cpp_constexpr 201304\n    #else\n    #define __cpp_constexpr 190000\n    #endif\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n    #if __cpp_constexpr >= 201304\n    #define BOOSTLITE_CONSTEXPR constexpr\n    #endif\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n    #define BOOSTLITE_CONSTEXPR\n    #endif\n\nThis is because ``__cpp_constexpr`` was not defined, so because of the ``--passthru-unknown-exprs`` flag\nwe pass through everything inside that if block **unexecuted** i.e. defines and undefs are NOT executed by\n``pcpp``. Let's define ``__cpp_constexpr``:\n\n``pcpp test.h --passthru-defines --passthru-unknown-exprs -D __cpp_constexpr``\n\n.. code-block:: c\n\n    #line 8 \"test.h\"\n    #ifndef BOOSTLITE_CONSTEXPR\n\n\n\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n    #define BOOSTLITE_CONSTEXPR\n    #endif\n\nSo, big difference now. We execute the entire first if block as ``__cpp_constexpr`` is now defined, thus\nleaving whitespace. Let's try setting ``__cpp_constexpr`` a bit higher:\n\n``pcpp test.h --passthru-defines --passthru-unknown-exprs -D __cpp_constexpr=201304``\n\n.. code-block:: c\n\n    #line 8 \"test.h\"\n    #ifndef BOOSTLITE_CONSTEXPR\n\n    #define BOOSTLITE_CONSTEXPR constexpr\n\n    #endif\n\nAs you can see, the lines related to the known ``__cpp_constexpr`` are executed and removed, passing through\nany if blocks with unknown macros in the expression.\n\nWhat if you want a macro to be known but undefined? The -U (to undefine) flag has an obvious meaning in pass\nthrough mode in that it makes a macro no longer unknown, but known to be undefined.\n\n``pcpp test.h --passthru-defines --passthru-unknown-exprs -U __cpp_constexpr``\n\n.. code-block:: c\n\n    #if __cplusplus >= 201402\n    #define __cpp_constexpr 201304\n    #else\n    #define __cpp_constexpr 190000\n    #endif\n\n    #ifndef BOOSTLITE_CONSTEXPR\n\n\n\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n    #define BOOSTLITE_CONSTEXPR\n    #endif\n\nHere ``__cpp_constexpr`` is known to be undefined so the first clause executes, but ``__cplusplus`` is\nunknown so that entire block is passed through unexecuted. In the next test comparing ``__cpp_constexpr``\nto 201304 it is still known to be undefined, and so 0 >= 201304 is the expressions tested which is false,\nhence the following stanza is removed entirely.\n\nHelping ``pcpp`` using source code annotation\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nYou can achieve a great deal using -D (define), -U (undefine) and -N (never define) on the command line,\nbut for more complex preprocessing it gets hard to pass through the correct logic without some source code\nannotation.\n\n``pcpp`` lets you annotate which part of an if block being passed through due to use of unknown macros\nto also be executed in addition to the pass through. For this use ``__PCPP_ALWAYS_FALSE__`` or\n``__PCPP_ALWAYS_TRUE__`` which tells ``pcpp`` to temporarily start executing the passed through\npreprocessor commands e.g.\n\n.. code-block:: c\n\n    #if !defined(__cpp_constexpr)\n    #if __cplusplus >= 201402L\n    #define __cpp_constexpr 201304\n    #elif !__PCPP_ALWAYS_FALSE__     // pcpp please execute this next block\n    #define __cpp_constexpr 190000\n    #endif\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n    #if __cpp_constexpr >= 201304\n    #define BOOSTLITE_CONSTEXPR constexpr\n    #endif\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n    #define BOOSTLITE_CONSTEXPR\n    #endif\n\nNote that ``__PCPP_ALWAYS_FALSE__`` will always be false in any other preprocessor, and it is also\nfalse in ``pcpp``. However, it causes ``pcpp`` to execute the define of ``__cpp_constexpr`` to 190000:\n\n``pcpp test.h --passthru-defines --passthru-unknown-exprs``\n\n.. code-block:: c\n\n    #if !defined(__cpp_constexpr)\n    #if __cplusplus >= 201402\n    #define __cpp_constexpr 201304\n    #elif 1\n    #define __cpp_constexpr 190000\n    #endif\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n\n\n\n    #endif\n    #ifndef BOOSTLITE_CONSTEXPR\n    #define BOOSTLITE_CONSTEXPR\n    #endif\n\nThis is one way of marking up ``#else`` clauses so they always execute in a normal preprocessor\nand also pass through with execution with ``pcpp``. You can, of course, also place ``|| __PCPP_ALWAYS_FALSE__``\nin any ``#if`` stanza to cause it to be passed through with execution, but not affect the\npreprocessing logic otherwise.\n\nWhat's implemented by the ``Preprocessor`` class:\n=================================================\n- Digraphs and Trigraphs\n- line continuation operator '``\\``'\n- C99 correct elimination of comments and maintenance of whitespace in output.\n- :c:`__DATE__`, :c:`__TIME__`, :c:`__FILE__`, :c:`__LINE__`. Note that :c:`__STDC__` et al are NOT defined by\n  default, you need to define those manually before starting preprocessing.\n- :c:`__COUNTER__`, a very common extension\n- Object :c:`#define`\n- Function :c:`#define macro(...)`\n\n  - Retokenisation and reexpansion after expansion is C99 compliant.\n\n- :c:`#undef`\n- :c:`#include \"path\"`, :c:`<path>` and :c:`PATH`\n- :c:`defined` operator\n- C operators:\n\n  - :c:`+, -, !, ~`\n  - :c:`*, /, %`\n  - :c:`+, -`\n  - :c:`<<, >>`\n  - :c:`<, <=, >, >=`\n  - :c:`==, !=`\n  - :c:`&`\n  - :c:`^`\n  - :c:`|`\n  - :c:`&&`\n  - :c:`||`\n  - :c:`x ? y : z` (partial support, see known bugs)\n\n- :c:`#if`, :c:`#ifdef`, :c:`#ifndef`, :c:`#elif`, :c:`#else`, :c:`#endif`\n- Stringizing operator #\n- Token pasting operator ##\n- :c:`#pragma once`, a very common extension\n\nAdditionally implemented by ``pcpp`` command line tool:\n-------------------------------------------------------\n- :c:`#error` (default implementation prints to stderr and increments the exit code)\n- :c:`#warning` (default implementation prints to stderr)\n\nNot implemented yet (donations of code welcome):\n------------------------------------------------\n- :c:`#pragma` anything other than :c:`once`.\n- :c:`_Pragma` used to emit preprocessor calculated #pragma.\n- :c:`#line num`, :c:`num \"file\"` and :c:`NUMBER FILE`.\n\nKnown bugs (ordered from worst to least worst):\n-----------------------------------------------\nNone presently known.\n\nCustomising your own preprocessor:\n==================================\nSee the API reference docs at https://ned14.github.io/pcpp/\n\nYou can find an example of overriding the ``on_*()`` processing hooks at https://github.com/ned14/pcpp/blob/master/pcpp/pcmd.py\n\nRunning Tests\n=============\nTo run the test suite for ``pcpp``, you can use either of these methods:\n\n1. Using pytest directly (recommended):\n   ::\n\n       python -m pytest tests/ -v\n\n2. Using the setup.py test command (deprecated but still functional):\n   ::\n\n       python setup.py test\n\nThe test suite includes various test cases covering C99 preprocessor functionality,\nedge cases, and compatibility with the C11 standard preprocessor torture samples.\n\n\nHistory:\n========\nv1.31 (?):\n----------\n- Remove Python 2 support completely; pcpp is now Python 3 only\n  (issue #87).\n- Replace setuptools test suite with pytest as the test runner.\n- Add ``uv`` support for faster dependency installation.\n- Rearrange ``main()`` function logic to avoid code duplication and make the\n  entry point cleaner (PR #73). Thanks to assarbad for this improvement.\n- Fix issue #79 by replacing ``CPP_INTEGER`` and ``CPP_FLOAT`` tokens with a\n  ``PP_NUMBER`` token for better preprocessing compliance. Update ``PP_NUMBER``\n  regex definition to properly handle digit separators in numeric literals.\n  Add new test file for issue0079. Thanks to willwray for the PR implementing\n  these features.\n- Add support for ``#include_next``, though note it is gated behind the\n``--enable-include-next`` command line option. Thanks to Dudeldu for the original\nPR #98.\n- Multi line and unicode character literals were not working by pure oversight.\nFixed and thanks to geky for showing the issue in PR #103.\n- Add support for ``__has_include``, a long requested and oft requested\nfeature (#53, #77, #97).\n- Disable the processing of trigraphs by default to match other C preprocessors.\nNow pass ``--trigraphs`` to enable them. Thanks to pmp-p for suggesting this #100.\n- Believe it or not, until now this caused an infinite loop:\n\n```\n#define FOO(x) x\n#define BAR FOO(BAR)\nBAR\n```\n\nThis is fixed, which closes #72, #101 and possibly quite a few more open issues.\nThanks to MatthewShao for originally reporting this.\n\nv1.30 (29th October 2021):\n--------------------------\n- Thanks to a 5km limit covid lockdown in my country, a public holiday where we were\n  supposed to be away meant I was stuck at home instead. I took the full day to finish\n  the https://github.com/ned14/pcpp/tree/yacc_expression_evaluator branch which is a\n  proper C preprocessor expression evaluator based on http://www.dabeaz.com/ply/ 's\n  yacc module. This was a very long outstanding piece of work which had been in\n  progress for nearly two years. It just needed a full day of my time to get it done,\n  and now it is indeed done at long last.\n- BREAKING CHANGE: Thanks to the new expression evaluator, fix a long standing bug\n  where unknown function macros in expressions were parsed as ``0(0)`` which obviously\n  enough does not work. Fixing this changes how the ``on_unknown_macro_in_expr()``\n  hook works, and there is now an added ``on_unknown_macro_function_in_expr()`` hook.\n- Add a new passthru option ``--passthru-includes`` which enables selected ``#include``\n  to be passed through, in addition to being executed. Thanks to schra for suggesting\n  this, including a PR. The original implementation had some subtle corner case bugs,\n  thanks to trelau for reporting those.\n- Fix a token expansion ordering bug whereby if a function macro used the same\n  macro in more than one argument, expansion in one argument evaluation caused overly\n  eager expansion in later argument evaluations. This fix ought to fix pcpp's ability\n  to parse Boost (untested). Thanks to joaquintides for reporting this.\n- Now that pcpp no longer ever calls ``eval()``, pcpp is PyPy compatible and is\n  probably also compatible with Pyston (untested). Typical speedup is about 2.2x-2.3x,\n  though it can also be slower occasionally for some inputs. PyPy compatibility is now\n  being tested by CI to ensure it remains working going forth.\n- Fix internal preprocessor error and failure to insert newlines before ``#include``\n  caused by certain sequence of line continuations in a macro. Thanks to dslijepcevic\n  for reporting this.\n\nv1.22 (19th October 2020):\n--------------------------\n- Fix bug where outputting to stdout did not combine with anything which\n  printed to stdout. Thanks to Fondesa for reporting this.\n- Fix extra newlines being inserted after a multiline comment. Thanks to virtuald\n  for sending a PR fixing this.\n- Fix not being able to actually specify an empty line directive. Thanks to kuri65536\n  for sending a PR fixing this.\n- Update ply submodule to latest from trunk.\n- Emit line continuations as tokens, rather than collapsing lines during parsing.\n  Thanks to MathieuDuponchelle for the pull request implementing this.\n- Enable parsing and emission of files in arbitrary text encodings. This is supported\n  in Python 3 or later only. Thanks to MathieuDuponchelle for the suggestion.\n- Fix bad regex for parsing floats, so now floats are correctly tokenised. Thanks\n  to LynnKirby for reporting this.\n- BREAKING CHANGE: Passthrough for ``#include MACRO`` was not supported. This was not\n  intentional, and to fix it required modifying the ``on_include_not_found()``\n  customisation point which is a source breaking change. Thanks to schra for reporting this.\n\nv1.21 (30th September 2019):\n----------------------------\n- Fix bug where token pasting two numeric tokens did not yield a numeric token. Thanks\n  to Sei-Lisa for reporting this.\n- BREAKING CHANGE: Paths emitted by pcpp into ``#line`` directives now are relative to the\n  working directory of the process when ``Preprocessor`` is initialised. This includes\n  added search paths - files included from those locations will be emitted with a sequence\n  of ``../`` to relativise the path emitted. If no path exists between the working\n  directory and the path of the file being emitted, an absolute path is emitted instead.\n\n  If you wish to disable this new behaviour, or use different behaviour, you can\n  customise the new `rewrite_paths` member variable of ``Preprocessor``.\n- Fix bug where ``__LINE__`` was expanding into the line number of its definition instead\n  of its use. Thanks to Sei-Lisa for reporting this.\n- Add ``--passthru-magic-macros`` command line option.\n- BREAKING CHANGE: The ``PreprocessorHooks`` and ``OutputDirective`` interface has\n  changed. One now must specify the kind of ``OutputDirective`` abort one wants, and one\n  can now both ignore AND remove directives. ``on_directive_handle()`` and\n  ``on_directive_unknown()`` now take an extra parameter ``precedingtoks``, these are the\n  tokens from the ``#`` up to the directive.\n- Fix a corner case where ``FUNC(void)foo()`` expanded to ``voidfoo()`` and not\n  ``void foo()`` which is a very common non-conforming extension of the C preprocessor.\n  Thanks to OmegaDoom for reporting this.\n- Add tokens for all the C operators, to help implementation of an expression evaluator.\n- Updated embedded ply to HEAD (2019-04-25)\n- Fix ``#include`` not working if no ``-I`` parameters were supplied. Thanks to csm10495\n  for reporting this.\n\nv1.20 (7th January 2019):\n-------------------------\n- Now supports character literals in expressions. Thanks to untaugh for the pull request\n  adding this.\n- Stopped the default collapsing of whitespace in output, and made it optional via a\n  new command line option ``--compress``.\n- Fixed extraneous whitespace in ``--passthru-comments`` caused by multiline comments.\n  Thanks to p2k for reporting this.\n- Fixed bug where defining a macro via string did not set the source attribute in the\n  token. Thanks to ZedThree for reporting this.\n- Stop triggering an exception when no arguments are supplied to pcpp. Thanks to\n  virtuald for reporting this.\n- Rebase onto PLY latest from Dec 28th 2018 (https://github.com/dabeaz/ply/commit/a37e0839583d683d95e70ce1445c0063c7d4bd21). Latest\n  PLY no longer works using pypi packaging, David wants people to include the source of\n  PLY directly. pcpp does this via a git submodule, and has setuptools bundle the submodule.\n- Add a formal LICENSE.txt file, as requested by Sei-Lisa.\n- Fix failure to issue ``#line`` directive for first include file in a file. Thanks to\n  Sei-Lisa for reporting this.\n\nv1.1 (19th June 2018):\n----------------------\n- Added the ``--times`` and ``--filetimes`` features.\n- Fix bug where macros containing operator `defined` were not being expanded properly.\n- Added the ability to accept multiple inputs, they are concatenated into the output.\n- Fix bug where lines beginning with `#` and no contents caused an internal preprocessor error.\n- Fix bug where the macro expansion ``par par##ext`` was expanding into ``parext parext``.\n\nv1.01 (21st Feb 2018):\n----------------------\n- Fix bug where in pass through mode, an #elif in an #if block inside an #if block in ifpassthru was failing to be passed through.\n- Downgraded failure to evaluate an expression to a warning.\n- Fix missing Readme.rst in pypi package.\n\nv1.00 (13th Mar 2017):\n----------------------\nFirst release\n"
  },
  {
    "path": "doc/evaluator.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n\n<head>\n    <meta charset=\"utf-8\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, minimum-scale=1\" />\n    <meta name=\"generator\" content=\"pdoc 0.5.3\" />\n    <title>pcpp.evaluator API documentation</title>\n    <meta name=\"description\" content=\"\" />\n    <link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>\n    <link href='https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/8.0.0/sanitize.min.css' rel='stylesheet'>\n    <link href=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css\" rel=\"stylesheet\">\n    <style>\n        .flex {\n            display: flex !important\n        }\n\n        body {\n            line-height: 1.5em\n        }\n\n        #content {\n            padding: 20px\n        }\n\n        #sidebar {\n            padding: 30px;\n            overflow: hidden\n        }\n\n        .http-server-breadcrumbs {\n            font-size: 130%;\n            margin: 0 0 15px 0\n        }\n\n        #footer {\n            font-size: .75em;\n            padding: 5px 30px;\n            border-top: 1px solid #ddd;\n            text-align: right\n        }\n\n        #footer p {\n            margin: 0 0 0 1em;\n            display: inline-block\n        }\n\n        #footer p:last-child {\n            margin-right: 30px\n        }\n\n        h1,\n        h2,\n        h3,\n        h4,\n        h5 {\n            font-weight: 300\n        }\n\n        h1 {\n            font-size: 2.5em;\n            line-height: 1.1em\n        }\n\n        h2 {\n            font-size: 1.75em;\n            margin: 1em 0 .50em 0\n        }\n\n        h3 {\n            font-size: 1.4em;\n            margin: 25px 0 10px 0\n        }\n\n        h4 {\n            margin: 0;\n            font-size: 105%\n        }\n\n        a {\n            color: #058;\n            text-decoration: none;\n            transition: color .3s ease-in-out\n        }\n\n        a:hover {\n            color: #e82\n        }\n\n        .title code {\n            font-weight: bold\n        }\n\n        h2[id^=\"header-\"] {\n            margin-top: 2em\n        }\n\n        .ident {\n            color: #900\n        }\n\n        pre code {\n            background: #f8f8f8;\n            font-size: .8em;\n            line-height: 1.4em\n        }\n\n        code {\n            background: #f2f2f1;\n            padding: 1px 4px;\n            overflow-wrap: break-word\n        }\n\n        h1 code {\n            background: transparent\n        }\n\n        pre {\n            background: #f8f8f8;\n            border: 0;\n            border-top: 1px solid #ccc;\n            border-bottom: 1px solid #ccc;\n            margin: 1em 0;\n            padding: 1ex\n        }\n\n        #http-server-module-list {\n            display: flex;\n            flex-flow: column\n        }\n\n        #http-server-module-list div {\n            display: flex\n        }\n\n        #http-server-module-list dt {\n            min-width: 10%\n        }\n\n        #http-server-module-list p {\n            margin-top: 0\n        }\n\n        .toc ul,\n        #index {\n            list-style-type: none;\n            margin: 0;\n            padding: 0\n        }\n\n        #index code {\n            background: transparent\n        }\n\n        #index h3 {\n            border-bottom: 1px solid #ddd\n        }\n\n        #index ul {\n            padding: 0\n        }\n\n        #index h4 {\n            font-weight: bold\n        }\n\n        #index h4+ul {\n            margin-bottom: .6em\n        }\n\n        @media (min-width:200ex) {\n            #index .two-column {\n                column-count: 2\n            }\n        }\n\n        @media (min-width:300ex) {\n            #index .two-column {\n                column-count: 3\n            }\n        }\n\n        dl {\n            margin-bottom: 2em\n        }\n\n        dl dl:last-child {\n            margin-bottom: 4em\n        }\n\n        dd {\n            margin: 0 0 1em 3em\n        }\n\n        #header-classes+dl>dd {\n            margin-bottom: 3em\n        }\n\n        dd dd {\n            margin-left: 2em\n        }\n\n        dd p {\n            margin: 10px 0\n        }\n\n        .name {\n            background: #eee;\n            font-weight: bold;\n            font-size: .85em;\n            padding: 5px 10px;\n            display: inline-block;\n            min-width: 40%\n        }\n\n        .name:hover {\n            background: #e0e0e0\n        }\n\n        .name>span:first-child {\n            white-space: nowrap\n        }\n\n        .name.class>span:nth-child(2) {\n            margin-left: .4em\n        }\n\n        .name small {\n            font-weight: normal\n        }\n\n        .inherited {\n            color: #999;\n            border-left: 5px solid #eee;\n            padding-left: 1em\n        }\n\n        .inheritance em {\n            font-style: normal;\n            font-weight: bold\n        }\n\n        .desc h2 {\n            font-weight: 400;\n            font-size: 1.25em\n        }\n\n        .desc h3 {\n            font-size: 1em\n        }\n\n        .desc dt code {\n            background: inherit\n        }\n\n        .source summary {\n            color: #666;\n            text-align: right;\n            font-weight: 400;\n            font-size: .8em;\n            text-transform: uppercase;\n            cursor: pointer\n        }\n\n        .source pre {\n            max-height: 500px;\n            overflow: auto;\n            margin: 0\n        }\n\n        .source pre code {\n            font-size: 12px;\n            overflow: visible\n        }\n\n        .hlist {\n            list-style: none\n        }\n\n        .hlist li {\n            display: inline\n        }\n\n        .hlist li:after {\n            content: ',\\2002'\n        }\n\n        .hlist li:last-child:after {\n            content: none\n        }\n\n        .hlist .hlist {\n            display: inline;\n            padding-left: 1em\n        }\n\n        img {\n            max-width: 100%\n        }\n\n        .admonition {\n            padding: .1em .5em\n        }\n\n        .admonition-title {\n            font-weight: bold\n        }\n\n        .admonition.note,\n        .admonition.info,\n        .admonition.important {\n            background: #aef\n        }\n\n        .admonition.todo,\n        .admonition.versionadded,\n        .admonition.tip,\n        .admonition.hint {\n            background: #dfd\n        }\n\n        .admonition.warning,\n        .admonition.versionchanged,\n        .admonition.deprecated {\n            background: #fd4\n        }\n\n        .admonition.error,\n        .admonition.danger,\n        .admonition.caution {\n            background: lightpink\n        }\n    </style>\n    <style media=\"screen and (min-width: 700px)\">\n        @media screen and (min-width:700px) {\n            #sidebar {\n                width: 30%\n            }\n\n            #content {\n                width: 70%;\n                max-width: 100ch;\n                padding: 3em 4em;\n                border-left: 1px solid #ddd\n            }\n\n            pre code {\n                font-size: 1em\n            }\n\n            .item .name {\n                font-size: 1em\n            }\n\n            main {\n                display: flex;\n                flex-direction: row-reverse;\n                justify-content: flex-end\n            }\n\n            .toc ul ul,\n            #index ul {\n                padding-left: 1.5em\n            }\n\n            .toc>ul>li {\n                margin-top: .5em\n            }\n        }\n    </style>\n    <style media=\"print\">\n        @media print {\n            #sidebar h1 {\n                page-break-before: always\n            }\n\n            .source {\n                display: none\n            }\n        }\n\n        @media print {\n            * {\n                background: transparent !important;\n                color: #000 !important;\n                box-shadow: none !important;\n                text-shadow: none !important\n            }\n\n            a[href]:after {\n                content: \" (\" attr(href) \")\";\n                font-size: 90%\n            }\n\n            a[href][title]:after {\n                content: none\n            }\n\n            abbr[title]:after {\n                content: \" (\" attr(title) \")\"\n            }\n\n            .ir a:after,\n            a[href^=\"javascript:\"]:after,\n            a[href^=\"#\"]:after {\n                content: \"\"\n            }\n\n            pre,\n            blockquote {\n                border: 1px solid #999;\n                page-break-inside: avoid\n            }\n\n            thead {\n                display: table-header-group\n            }\n\n            tr,\n            img {\n                page-break-inside: avoid\n            }\n\n            img {\n                max-width: 100% !important\n            }\n\n            @page {\n                margin: 0.5cm\n            }\n\n            p,\n            h2,\n            h3 {\n                orphans: 3;\n                widows: 3\n            }\n\n            h1,\n            h2,\n            h3,\n            h4,\n            h5,\n            h6 {\n                page-break-after: avoid\n            }\n        }\n    </style>\n</head>\n\n<body>\n    <main>\n        <article id=\"content\">\n            <header>\n                <h1 class=\"title\"><code>pcpp.evaluator</code> module</h1>\n            </header>\n            <section id=\"section-intro\">\n                <details class=\"source\">\n                    <summary>Source code</summary>\n                    <pre><code class=\"python\">#!/usr/bin/python\n# Python C99 conforming preprocessor expression evaluator\n# (C) 2019-2026 Niall Douglas http://www.nedproductions.biz/\n# Started: Apr 2019\n\nfrom __future__ import generators, print_function, absolute_import, division\n\nimport sys, os, re, codecs, copy\nif __name__ == &#39;__main__&#39; and __package__ is None:\n    sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )\nfrom pcpp.parser import STRING_TYPES, yacc, default_lexer, in_production\n\n# The width of signed integer which this evaluator will use\nINTMAXBITS = 64\n\n# Some Python 3 compatibility shims\nif sys.version_info.major &lt; 3:\n    INTBASETYPE = long\nelse:\n    INTBASETYPE = int\n\n# Precompile the regular expression for correctly expanding unicode escape\n# sequences in Python 2 and 3. See https://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python\n# for more information.\n_expand_escape_sequences_pat = re.compile(r&#39;&#39;&#39;\n    ( \\\\U........      # 8-digit hex escapes\n    | \\\\u....          # 4-digit hex escapes\n    | \\\\x..            # 2-digit hex escapes\n    | \\\\[0-7]{1,3}     # Octal escapes\n    | \\\\N\\{[^}]+\\}     # Unicode characters by name\n    | \\\\[\\\\&#39;&#34;abfnrtv]  # Single-character escapes\n)&#39;&#39;&#39;, re.UNICODE | re.VERBOSE)\n\nclass Value(INTBASETYPE):\n    &#34;&#34;&#34;A signed or unsigned integer within a preprocessor expression, bounded\n    to within INT_MIN and INT_MAX, or 0 and UINT_MAX. Signed overflow is handled\n    like a two&#39;s complement CPU, despite being UB, as that&#39;s what GCC and clang do.\n    \n    &gt;&gt;&gt; Value(5)\n    Value(5)\n    &gt;&gt;&gt; Value(&#39;5L&#39;)\n    Value(5)\n    &gt;&gt;&gt; Value(&#39;5U&#39;)\n    Value(5U)\n    &gt;&gt;&gt; Value(&#39;0&#39;)\n    Value(0)\n    &gt;&gt;&gt; Value(&#39;0U&#39;)\n    Value(0U)\n    &gt;&gt;&gt; Value(&#39;-1U&#39;)\n    Value(18446744073709551615U)\n    &gt;&gt;&gt; Value(5) * Value(2)\n    Value(10)\n    &gt;&gt;&gt; Value(5) + Value(&#39;2u&#39;)\n    Value(7U)\n    &gt;&gt;&gt; Value(5) * 2\n    Value(10)\n    &gt;&gt;&gt; Value(5) / 2   # Must return integer\n    Value(2)\n    &gt;&gt;&gt; Value(50) % 8\n    Value(2)\n    &gt;&gt;&gt; -Value(5)\n    Value(-5)\n    &gt;&gt;&gt; +Value(-5)\n    Value(-5)\n    &gt;&gt;&gt; ~Value(5)\n    Value(-6)\n    &gt;&gt;&gt; Value(6) &amp; 2\n    Value(2)\n    &gt;&gt;&gt; Value(4) | 2\n    Value(6)\n    &gt;&gt;&gt; Value(6) ^ 2\n    Value(4)\n    &gt;&gt;&gt; Value(2) &lt;&lt; 2\n    Value(8)\n    &gt;&gt;&gt; Value(8) &gt;&gt; 2\n    Value(2)\n    &gt;&gt;&gt; Value(9223372036854775808)\n    Value(-9223372036854775808)\n    &gt;&gt;&gt; Value(-9223372036854775809)\n    Value(9223372036854775807)\n    &gt;&gt;&gt; Value(18446744073709551615)\n    Value(-1)\n    &gt;&gt;&gt; Value(False)\n    Value(0)\n    &gt;&gt;&gt; Value(True)\n    Value(1)\n    &gt;&gt;&gt; Value(5) == Value(6)\n    Value(0)\n    &gt;&gt;&gt; Value(5) == Value(5)\n    Value(1)\n    &gt;&gt;&gt; not Value(2)\n    Traceback (most recent call last):\n    ...\n    AssertionError\n    &gt;&gt;&gt; Value(4) and Value(2)\n    Traceback (most recent call last):\n    ...\n    AssertionError\n    &gt;&gt;&gt; Value(5) and not Value(6)\n    Traceback (most recent call last):\n    ...\n    AssertionError\n    &gt;&gt;&gt; Value(&#39;0x3f&#39;)\n    Value(63)\n    &gt;&gt;&gt; Value(&#39;077&#39;)\n    Value(63)\n    &gt;&gt;&gt; Value(&#34;&#39;N&#39;&#34;)\n    Value(78)\n    &gt;&gt;&gt; Value(&#34;L&#39;N&#39;&#34;)\n    Value(78)\n    &gt;&gt;&gt; Value(&#34;&#39;\\\\n&#39;&#34;)\n    Value(10)\n    &gt;&gt;&gt; Value(&#34;&#39;\\\\\\\\n&#39;&#34;)\n    Value(10)\n    &gt;&gt;&gt; Value(&#34;&#39;\\\\\\\\&#39;&#34;)\n    Value(92)\n    &gt;&gt;&gt; Value(&#34;&#39;\\\\&#39;&#34;)\n    Traceback (most recent call last):\n    ...\n    SyntaxError: Empty character escape sequence\n    &#34;&#34;&#34;\n    INT_MIN = -(1 &lt;&lt; (INTMAXBITS - 1))\n    INT_MAX = (1 &lt;&lt; (INTMAXBITS - 1)) - 1\n    INT_MASK = (1 &lt;&lt; INTMAXBITS) - 1\n    UINT_MIN = 0\n    UINT_MAX = (1 &lt;&lt; INTMAXBITS) - 1\n    @classmethod\n    def __sclamp(cls, value):\n        value = INTBASETYPE(value)\n        return ((value - cls.INT_MIN) &amp; cls.INT_MASK) + cls.INT_MIN\n    @classmethod\n    def __uclamp(cls, value):\n        value = INTBASETYPE(value)\n        return value &amp; cls.UINT_MAX\n    def __new__(cls, value, unsigned = False, exception = None):\n        if isinstance(value, Value):\n            unsigned = value.unsigned\n            exception = value.exception\n        elif isinstance(value, INTBASETYPE) or isinstance(value, int) or isinstance(value, float):\n            value = cls.__uclamp(value) if unsigned else cls.__sclamp(value)\n        elif isinstance(value, STRING_TYPES):\n            if (value.startswith(&#34;L&#39;&#34;) or value[0] == &#34;&#39;&#34;) and value[-1] == &#34;&#39;&#34;:\n                startidx = 2 if value.startswith(&#34;L&#39;&#34;) else 1\n                #print(&#34;1. ***&#34;, value, file = sys.stderr)\n                value = value[startidx:-1]\n                if len(value) == 0:\n                    raise SyntaxError(&#39;Empty character escape sequence&#39;)\n                #print(&#34;2. ***&#34;, value, file = sys.stderr)\n                value = _expand_escape_sequences_pat.sub(lambda x: codecs.decode(x.group(0), &#39;unicode-escape&#39;), value)\n                #print(&#34;3. ***&#34;, value, file = sys.stderr)\n                x = INTBASETYPE(ord(value))\n                #print(&#34;4. ***&#34;, x, file = sys.stderr)\n            elif value.startswith(&#39;0x&#39;) or value.startswith(&#39;0X&#39;):\n                # Strip any terminators\n                while not ((value[-1] &gt;= &#39;0&#39; and value[-1] &lt;= &#39;9&#39;) or (value[-1] &gt;= &#39;a&#39; and value[-1] &lt;= &#39;f&#39;) or (value[-1] &gt;= &#39;A&#39; and value[-1] &lt;= &#39;F&#39;)):\n                    if value[-1] == &#39;u&#39; or value[-1] == &#39;U&#39;:\n                        unsigned = True\n                    value = value[:-1]\n                x = INTBASETYPE(value, base = 16)\n            elif value.startswith(&#39;0&#39;):\n                # Strip any terminators\n                while not (value[-1] &gt;= &#39;0&#39; and value[-1] &lt;= &#39;7&#39;):\n                    if value[-1] == &#39;u&#39; or value[-1] == &#39;U&#39;:\n                        unsigned = True\n                    value = value[:-1]\n                x = INTBASETYPE(value, base = 8)\n            else:\n                # Strip any terminators\n                while not (value[-1] &gt;= &#39;0&#39; and value[-1] &lt;= &#39;9&#39;):\n                    if value[-1] == &#39;u&#39; or value[-1] == &#39;U&#39;:\n                        unsigned = True\n                    value = value[:-1]\n                x = INTBASETYPE(value)\n            value = cls.__uclamp(x) if unsigned else cls.__sclamp(x)\n            #assert x == value\n        else:\n            print(&#39;Unknown value type: %s&#39; % repr(type(value)), file = sys.stderr)\n            assert False  # Input is an unrecognised type\n        inst = super(Value, cls).__new__(cls, value)\n        inst.unsigned = unsigned\n        inst.exception = exception\n        return inst\n    def value(self):\n        if self.exception is not None:\n            raise self.exception\n        return INTBASETYPE(self)\n    def __add__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) + self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__add__(other))\n    def __sub__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) - self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__sub__(other))\n    def __mul__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) * self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__mul__(other))\n    def __div__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) / self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__div__(other))\n    def __truediv__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) / self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__truediv__(other))\n    def __mod__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) % self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__mod__(other))\n    def __neg__(self):\n        if self.exception is not None:\n            return self\n        return Value(super(Value, self).__neg__(), self.unsigned)\n    def __invert__(self):\n        if self.exception is not None:\n            return self\n        return Value(super(Value, self).__invert__(), self.unsigned)\n    def __and__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &amp; self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__and__(other))\n    def __or__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) | self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__or__(other))\n    def __pos__(self):\n        if self.exception is not None:\n            return self\n        return Value(super(Value, self).__pos__())\n    def __pow__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) ** self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__pow__(other))\n    def __lshift__(self, other):\n        if self.exception is not None:\n            return self\n        # Ignore other signedness\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &lt;&lt; self.__uclamp(other), True) if (self.unsigned) else Value(super(Value, self).__lshift__(other))\n    def __rshift__(self, other):\n        if self.exception is not None:\n            return self\n        # Ignore other signedness\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &gt;&gt; self.__uclamp(other), True) if (self.unsigned) else Value(super(Value, self).__rshift__(other))\n    def __xor__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) ^ self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__xor__(other))\n    def __repr__(self):\n        if self.exception is not None:\n            return &#34;Exception(%s)&#34; % repr(self.exception)\n        elif self.unsigned:\n            return &#34;Value(%dU)&#34; % INTBASETYPE(self)\n        else:\n            return &#34;Value(%d)&#34; % INTBASETYPE(self)\n    def __bool__(self):\n        assert False  # Do not use Python logical operations\n    def __nonzero__(self):\n        assert False  # Do not use Python logical operations\n    def __cmp__(self, other):\n        assert False\n    def __lt__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &lt; self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) &lt; self.__sclamp(other), False)\n    def __le__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &lt;= self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) &lt;= self.__sclamp(other), False)\n    def __eq__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) == self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) == self.__sclamp(other), False)\n    def __ne__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) != self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) != self.__sclamp(other), False)\n    def __ge__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &gt;= self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) &gt;= self.__sclamp(other), False)\n    def __gt__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &gt; self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) &gt; self.__sclamp(other), False)\n\n        \n# PLY yacc specification\n# Valid C preprocessor expression items:\n#   - Integer constants\n#   - Character constants\n#   - Addition, subtraction, multiplication, division, bitwise and-or-xor, shifts,\n#     comparisons, logical and-or-not\n#   - defined()\n#\n# The C preprocessor does not support:\n#   - assignment\n#   - increment and decrement\n#   - array indexing, indirection\n#   - casting\n#   - sizeof, alignof\n\n# The subset of tokens from Preprocessor used in preprocessor expressions\ntokens = (\n   &#39;CPP_ID&#39;, &#39;CPP_INTEGER&#39;, &#39;CPP_CHAR&#39;, &#39;CPP_STRING&#39;,\n   &#39;CPP_PLUS&#39;, &#39;CPP_MINUS&#39;, &#39;CPP_STAR&#39;, &#39;CPP_FSLASH&#39;, &#39;CPP_PERCENT&#39;, &#39;CPP_BAR&#39;,\n   &#39;CPP_AMPERSAND&#39;, &#39;CPP_TILDE&#39;, &#39;CPP_HAT&#39;, &#39;CPP_LESS&#39;, &#39;CPP_GREATER&#39;, &#39;CPP_EXCLAMATION&#39;,\n   &#39;CPP_QUESTION&#39;, &#39;CPP_LPAREN&#39;, &#39;CPP_RPAREN&#39;,\n   &#39;CPP_COMMA&#39;, &#39;CPP_COLON&#39;,\n\n   &#39;CPP_LSHIFT&#39;, &#39;CPP_LESSEQUAL&#39;, &#39;CPP_RSHIFT&#39;,\n   &#39;CPP_GREATEREQUAL&#39;, &#39;CPP_LOGICALOR&#39;, &#39;CPP_LOGICALAND&#39;, &#39;CPP_EQUALITY&#39;,\n   &#39;CPP_INEQUALITY&#39;\n)\n# &#39;CPP_WS&#39;, &#39;CPP_EQUAL&#39;,  &#39;CPP_BSLASH&#39;, &#39;CPP_SQUOTE&#39;,\n\nprecedence = (\n    (&#39;left&#39;, &#39;CPP_COMMA&#39;),                                                     # 15\n                                                                               # 14 (assignments, unused)\n    (&#39;left&#39;, &#39;CPP_QUESTION&#39;, &#39;CPP_COLON&#39;),                                     # 13\n    (&#39;left&#39;, &#39;CPP_LOGICALOR&#39;),                                                 # 12\n    (&#39;left&#39;, &#39;CPP_LOGICALAND&#39;),                                                # 11\n    (&#39;left&#39;, &#39;CPP_BAR&#39;),                                                       # 10\n    (&#39;left&#39;, &#39;CPP_HAT&#39;),                                                       # 9\n    (&#39;left&#39;, &#39;CPP_AMPERSAND&#39;),                                                 # 8\n    (&#39;left&#39;, &#39;CPP_EQUALITY&#39;, &#39;CPP_INEQUALITY&#39;),                                # 7\n    (&#39;left&#39;, &#39;CPP_LESS&#39;, &#39;CPP_LESSEQUAL&#39;, &#39;CPP_GREATER&#39;, &#39;CPP_GREATEREQUAL&#39;),  # 6\n    (&#39;left&#39;, &#39;CPP_LSHIFT&#39;, &#39;CPP_RSHIFT&#39;),                                      # 5\n    (&#39;left&#39;, &#39;CPP_PLUS&#39;, &#39;CPP_MINUS&#39;),                                         # 4\n    (&#39;left&#39;, &#39;CPP_STAR&#39;, &#39;CPP_FSLASH&#39;, &#39;CPP_PERCENT&#39;),                         # 3\n    (&#39;right&#39;, &#39;UPLUS&#39;, &#39;UMINUS&#39;, &#39;CPP_EXCLAMATION&#39;, &#39;CPP_TILDE&#39;),              # 2\n                                                                               # 1 (unused in the C preprocessor)\n)\n\ndef p_error(p):\n    if p:\n        raise SyntaxError(&#34;around token &#39;%s&#39; type %s&#34; % (p.value, p.type))\n    else:\n        raise SyntaxError(&#34;at EOF&#34;)\n\ndef p_expression_number(p):\n    &#39;expression : CPP_INTEGER&#39;\n    p[0] = Value(p[1])\n\ndef p_expression_character(p):\n    &#39;expression : CPP_CHAR&#39;\n    p[0] = Value(p[1])\n\ndef p_expression_string(p):\n    &#34;&#34;&#34;\n    expression : CPP_STRING\n              | CPP_LESS expression CPP_GREATER\n    &#34;&#34;&#34;\n    p[0] = p[1]\n\ndef p_expression_group(t):\n    &#39;expression : CPP_LPAREN expression CPP_RPAREN&#39;\n    t[0] = t[2]\n\ndef p_expression_uplus(p):\n    &#39;expression : CPP_PLUS expression %prec UPLUS&#39;\n    p[0] = +Value(p[2])\n\ndef p_expression_uminus(p):\n    &#39;expression : CPP_MINUS expression %prec UMINUS&#39;\n    p[0] = -Value(p[2])\n\ndef p_expression_unop(p):\n    &#34;&#34;&#34;\n    expression : CPP_EXCLAMATION expression\n              | CPP_TILDE expression\n    &#34;&#34;&#34;\n    try:\n        if p[1] == &#39;!&#39;:\n            p[0] = Value(0) if (Value(p[2]).value()!=0) else Value(1)\n        elif p[1] == &#39;~&#39;:\n            p[0] = ~Value(p[2])\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\ndef p_expression_binop(p):\n    &#34;&#34;&#34;\n    expression : expression CPP_STAR expression\n              | expression CPP_FSLASH expression\n              | expression CPP_PERCENT expression\n              | expression CPP_PLUS expression\n              | expression CPP_MINUS expression\n              | expression CPP_LSHIFT expression\n              | expression CPP_RSHIFT expression\n              | expression CPP_LESS expression\n              | expression CPP_LESSEQUAL expression\n              | expression CPP_GREATER expression\n              | expression CPP_GREATEREQUAL expression\n              | expression CPP_EQUALITY expression\n              | expression CPP_INEQUALITY expression\n              | expression CPP_AMPERSAND expression\n              | expression CPP_HAT expression\n              | expression CPP_BAR expression\n              | expression CPP_LOGICALAND expression\n              | expression CPP_LOGICALOR expression\n              | expression CPP_COMMA expression\n    &#34;&#34;&#34;\n    # print [repr(p[i]) for i in range(0,4)]\n    try:\n        if p[2] == &#39;*&#39;:\n            p[0] = Value(p[1]) * Value(p[3])\n        elif p[2] == &#39;/&#39;:\n            p[0] = Value(p[1]) / Value(p[3])\n        elif p[2] == &#39;%&#39;:\n            p[0] = Value(p[1]) % Value(p[3])\n        elif p[2] == &#39;+&#39;:\n            p[0] = Value(p[1]) + Value(p[3])\n        elif p[2] == &#39;-&#39;:\n            p[0] = Value(p[1]) - Value(p[3])\n        elif p[2] == &#39;&lt;&lt;&#39;:\n            p[0] = Value(p[1]) &lt;&lt; Value(p[3])\n        elif p[2] == &#39;&gt;&gt;&#39;:\n            p[0] = Value(p[1]) &gt;&gt; Value(p[3])\n        elif p[2] == &#39;&lt;&#39;:\n            p[0] = Value(p[1]) &lt; Value(p[3])\n        elif p[2] == &#39;&lt;=&#39;:\n            p[0] = Value(p[1]) &lt;= Value(p[3])\n        elif p[2] == &#39;&gt;&#39;:\n            p[0] = Value(p[1]) &gt; Value(p[3])\n        elif p[2] == &#39;&gt;=&#39;:\n            p[0] = Value(p[1]) &gt;= Value(p[3])\n        elif p[2] == &#39;==&#39;:\n            p[0] = Value(p[1]) == Value(p[3])\n        elif p[2] == &#39;!=&#39;:\n            p[0] = Value(p[1]) != Value(p[3])\n        elif p[2] == &#39;&amp;&#39;:\n            p[0] = Value(p[1]) &amp; Value(p[3])\n        elif p[2] == &#39;^&#39;:\n            p[0] = Value(p[1]) ^ Value(p[3])\n        elif p[2] == &#39;|&#39;:\n            p[0] = Value(p[1]) | Value(p[3])\n        elif p[2] == &#39;&amp;&amp;&#39;:\n            p[0] = Value(1) if (Value(p[1]).value()!=0 and Value(p[3]).value()!=0) else Value(0)\n        elif p[2] == &#39;||&#39;:\n            p[0] = Value(1) if (Value(p[1]).value()!=0 or Value(p[3]).value()!=0) else Value(0)\n        elif p[2] == &#39;,&#39;:\n            p[0] = Value(p[3])\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\ndef p_expression_conditional(p):\n    &#39;expression : expression CPP_QUESTION expression CPP_COLON expression&#39;\n    try:\n        # Output type must cast up to unsigned if either input is unsigned\n        p[0] = Value(p[3]) if (Value(p[1]).value()!=0) else Value(p[5])\n        try:\n            p[0] = Value(p[0].value(), unsigned = Value(p[3]).unsigned or Value(p[5]).unsigned)\n        except:\n            pass\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\ndef p_expression_function_call(p):\n    &#34;expression : CPP_ID CPP_LPAREN expression CPP_RPAREN&#34;\n    try:\n        p.lexer.on_function_call(p)\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\ndef p_expression_identifier(p):\n    &#34;expression : CPP_ID&#34;\n    try:\n        p.lexer.on_identifier(p)\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\n\nclass Evaluator(object):\n    &#34;&#34;&#34;Evaluator of #if C preprocessor expressions.\n    \n    &gt;&gt;&gt; e = Evaluator()\n    &gt;&gt;&gt; e(&#39;5&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5+6&#39;)\n    Value(11)\n    &gt;&gt;&gt; e(&#39;5+6*2&#39;)\n    Value(17)\n    &gt;&gt;&gt; e(&#39;5/2+6*2&#39;)\n    Value(14)\n    &gt;&gt;&gt; e(&#39;5 &lt; 6 &lt;= 7&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;5 &lt; 6 &amp;&amp; 8 &gt; 7&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;18446744073709551615 == -1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;-9223372036854775809 == 9223372036854775807&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;-1 &lt; 0U&#39;)\n    Value(0U)\n    &gt;&gt;&gt; e(&#39;(( 0L &amp;&amp; 0) || (!0L &amp;&amp; !0 ))&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(1)?2:3&#39;)\n    Value(2)\n    &gt;&gt;&gt; e(&#39;(1 ? -1 : 0) &lt;= 0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(1 ? -1 : 0U)&#39;)       # Output type of ? must be common between both choices\n    Value(18446744073709551615U)\n    &gt;&gt;&gt; e(&#39;(1 ? -1 : 0U) &lt;= 0&#39;)\n    Value(0U)\n    &gt;&gt;&gt; e(&#39;1 &amp;&amp; 10 / 0&#39;)         # doctest: +ELLIPSIS\n    Exception(ZeroDivisionError(&#39;division by zero&#39;...\n    &gt;&gt;&gt; e(&#39;0 &amp;&amp; 10 / 0&#39;)         # &amp;&amp; must shortcut\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1 ? 10 / 0 : 0&#39;)      # doctest: +ELLIPSIS\n    Exception(ZeroDivisionError(&#39;division by zero&#39;...\n    &gt;&gt;&gt; e(&#39;0 ? 10 / 0 : 0&#39;)      # ? must shortcut\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 != 4&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(2 || 3) != 1 || (2 &amp;&amp; 3) != 1 || (0 || 4) != 1 || (0 &amp;&amp; 5) != 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;-1 &lt;&lt; 3U &gt; 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#34;&#39;N&#39; == 78&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0x3f == 63&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\n&#39;&#34;)\n    Value(10)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\\\\\\\\\&#39;&#34;)\n    Value(92)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\n&#39; == 0xA&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\\\\\\\\\&#39; == 0x5c&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;L&#39;\\\\\\\\0&#39; == 0&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;12 == 12&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;12L == 12&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;-1 &gt;= 0U&#39;)\n    Value(1U)\n    &gt;&gt;&gt; e(&#39;(1&lt;&lt;2) == 4&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(-!+!9) == -1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(2 || 3) == 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;1L * 3 != 3&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(!1L != 0) || (-1L != -1)&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0177777 == 65535&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0Xffff != 65535 || 0XFfFf == 65535&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0L != 0 || 0l != 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1U != 1 || 1u == 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0 &lt;= -1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 == 4&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 ^ 5) == 6&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 | 5) == 7&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 &amp; 5) == 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(0 ? 1 : 2) != 2&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;-1 &lt;&lt; 3U &gt; 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0 &amp;&amp; 10 / 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;not_defined &amp;&amp; 10 / not_defined&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown identifier not_defined&#39;...\n    &gt;&gt;&gt; e(&#39;0 &amp;&amp; 10 / 0 &gt; 1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(0) ? 10 / 0 : 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0 == 0 || 10 / 0 &gt; 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(15 &gt;&gt; 2 &gt;&gt; 1 != 1) || (3 &lt;&lt; 2 &lt;&lt; 1 != 24)&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(1 | 2) == 3 &amp;&amp; 4 != 5 || 0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;1  &gt;  0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;&#39;\\123&#39; != 83&#34;)\n    Value(0)\n    &gt;&gt;&gt; e(&#34;&#39;\\x1b&#39; != &#39;\\033&#39;&#34;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0 + (1 - (2 + (3 - (4 + (5 - (6 + (7 - (8 + (9 - (10 + (11 - (12 +          (13 - (14 + (15 - (16 + (17 - (18 + (19 - (20 + (21 - (22 + (23 -           (24 + (25 - (26 + (27 - (28 + (29 - (30 + (31 - (32 + 0))))))))))           )))))))))))))))))))))) == 0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;test_function(X)&#39;, functions={&#39;test_function&#39;:lambda x: 55})\n    Value(55)\n    &gt;&gt;&gt; e(&#39;test_identifier&#39;, identifiers={&#39;test_identifier&#39;:11})\n    Value(11)\n    &gt;&gt;&gt; e(&#39;defined(X)&#39;, functions={&#39;defined&#39;:lambda x: 55})\n    Value(55)\n    &gt;&gt;&gt; e(&#39;defined(X)&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown function defined&#39;...\n    &gt;&gt;&gt; e(&#39;__has_include(&#34;variant&#34;)&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown function __has_include&#39;...\n    &gt;&gt;&gt; e(&#39;__has_include(&lt;variant&gt;)&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown function __has_include&#39;...\n    &gt;&gt;&gt; e(&#39;5  // comment&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5  /* comment */&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5  /* comment // more */&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5  // /* comment */&#39;)\n    Value(5)\n    &#34;&#34;&#34;\n#    &gt;&gt;&gt; e(&#39;defined X&#39;, functions={&#39;defined&#39;:lambda x: 55})\n#    Value(55)\n\n    def __init__(self, lexer = None):\n        self.lexer = lexer if lexer is not None else default_lexer()\n        self.parser = yacc.yacc(optimize=in_production,debug=not in_production,write_tables=not in_production)\n\n    class __lexer(object):\n\n        def __init__(self, functions, identifiers):\n            self.__toks = []\n            self.__functions = functions\n            self.__identifiers = identifiers\n\n        def input(self, toks):\n            self.__toks = [tok for tok in toks if tok.type != &#39;CPP_WS&#39; and tok.type != &#39;CPP_LINECONT&#39; and tok.type != &#39;CPP_COMMENT1&#39; and tok.type != &#39;CPP_COMMENT2&#39;]\n            self.__idx = 0\n\n        def token(self):\n            if self.__idx &gt;= len(self.__toks):\n                return None\n            self.__idx = self.__idx + 1\n            return self.__toks[self.__idx - 1]\n\n        def on_function_call(self, p):\n            if p[1] not in self.__functions:\n                raise SyntaxError(&#39;Unknown function %s&#39; % p[1])\n            p[0] = Value(self.__functions[p[1]](p[3]))\n\n        def on_identifier(self, p):\n            if p[1] not in self.__identifiers:\n                raise SyntaxError(&#39;Unknown identifier %s&#39; % p[1])\n            p[0] = Value(self.__identifiers[p[1]])\n            \n    def __call__(self, input, functions = {}, identifiers = {}):\n        &#34;&#34;&#34;Execute a fully macro expanded set of tokens representing an expression,\n        returning the result of the evaluation.\n        &#34;&#34;&#34;\n        if not isinstance(input,list):\n            self.lexer.input(input)\n            input = []\n            while True:\n                tok = self.lexer.token()\n                if not tok:\n                    break\n                input.append(tok)\n        return self.parser.parse(input, lexer = self.__lexer(functions, identifiers))\n\n\nif __name__ == &#34;__main__&#34;:\n    import doctest\n    doctest.testmod()</code></pre>\n                </details>\n            </section>\n            <section>\n            </section>\n            <section>\n            </section>\n            <section>\n                <h2 class=\"section-title\" id=\"header-functions\">Functions</h2>\n                <dl>\n                    <dt id=\"pcpp.evaluator.p_error\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_error</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\"></section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_error(p):\n    if p:\n        raise SyntaxError(&#34;around token &#39;%s&#39; type %s&#34; % (p.value, p.type))\n    else:\n        raise SyntaxError(&#34;at EOF&#34;)</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_binop\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_binop</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>expression</code>\n                                    <code>CPP_STAR</code> <code>expression</code></dt>\n                                <dd>| expression CPP_FSLASH expression\n                                    | expression CPP_PERCENT expression\n                                    | expression CPP_PLUS expression\n                                    | expression CPP_MINUS expression\n                                    | expression CPP_LSHIFT expression\n                                    | expression CPP_RSHIFT expression\n                                    | expression CPP_LESS expression\n                                    | expression CPP_LESSEQUAL expression\n                                    | expression CPP_GREATER expression\n                                    | expression CPP_GREATEREQUAL expression\n                                    | expression CPP_EQUALITY expression\n                                    | expression CPP_INEQUALITY expression\n                                    | expression CPP_AMPERSAND expression\n                                    | expression CPP_HAT expression\n                                    | expression CPP_BAR expression\n                                    | expression CPP_LOGICALAND expression\n                                    | expression CPP_LOGICALOR expression\n                                    | expression CPP_COMMA expression</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_binop(p):\n    &#34;&#34;&#34;\n    expression : expression CPP_STAR expression\n              | expression CPP_FSLASH expression\n              | expression CPP_PERCENT expression\n              | expression CPP_PLUS expression\n              | expression CPP_MINUS expression\n              | expression CPP_LSHIFT expression\n              | expression CPP_RSHIFT expression\n              | expression CPP_LESS expression\n              | expression CPP_LESSEQUAL expression\n              | expression CPP_GREATER expression\n              | expression CPP_GREATEREQUAL expression\n              | expression CPP_EQUALITY expression\n              | expression CPP_INEQUALITY expression\n              | expression CPP_AMPERSAND expression\n              | expression CPP_HAT expression\n              | expression CPP_BAR expression\n              | expression CPP_LOGICALAND expression\n              | expression CPP_LOGICALOR expression\n              | expression CPP_COMMA expression\n    &#34;&#34;&#34;\n    # print [repr(p[i]) for i in range(0,4)]\n    try:\n        if p[2] == &#39;*&#39;:\n            p[0] = Value(p[1]) * Value(p[3])\n        elif p[2] == &#39;/&#39;:\n            p[0] = Value(p[1]) / Value(p[3])\n        elif p[2] == &#39;%&#39;:\n            p[0] = Value(p[1]) % Value(p[3])\n        elif p[2] == &#39;+&#39;:\n            p[0] = Value(p[1]) + Value(p[3])\n        elif p[2] == &#39;-&#39;:\n            p[0] = Value(p[1]) - Value(p[3])\n        elif p[2] == &#39;&lt;&lt;&#39;:\n            p[0] = Value(p[1]) &lt;&lt; Value(p[3])\n        elif p[2] == &#39;&gt;&gt;&#39;:\n            p[0] = Value(p[1]) &gt;&gt; Value(p[3])\n        elif p[2] == &#39;&lt;&#39;:\n            p[0] = Value(p[1]) &lt; Value(p[3])\n        elif p[2] == &#39;&lt;=&#39;:\n            p[0] = Value(p[1]) &lt;= Value(p[3])\n        elif p[2] == &#39;&gt;&#39;:\n            p[0] = Value(p[1]) &gt; Value(p[3])\n        elif p[2] == &#39;&gt;=&#39;:\n            p[0] = Value(p[1]) &gt;= Value(p[3])\n        elif p[2] == &#39;==&#39;:\n            p[0] = Value(p[1]) == Value(p[3])\n        elif p[2] == &#39;!=&#39;:\n            p[0] = Value(p[1]) != Value(p[3])\n        elif p[2] == &#39;&amp;&#39;:\n            p[0] = Value(p[1]) &amp; Value(p[3])\n        elif p[2] == &#39;^&#39;:\n            p[0] = Value(p[1]) ^ Value(p[3])\n        elif p[2] == &#39;|&#39;:\n            p[0] = Value(p[1]) | Value(p[3])\n        elif p[2] == &#39;&amp;&amp;&#39;:\n            p[0] = Value(1) if (Value(p[1]).value()!=0 and Value(p[3]).value()!=0) else Value(0)\n        elif p[2] == &#39;||&#39;:\n            p[0] = Value(1) if (Value(p[1]).value()!=0 or Value(p[3]).value()!=0) else Value(0)\n        elif p[2] == &#39;,&#39;:\n            p[0] = Value(p[3])\n    except Exception as e:\n        p[0] = Value(0, exception = e)</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_character\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_character</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>CPP_CHAR</code></dt>\n                                <dd>&nbsp;</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_character(p):\n    &#39;expression : CPP_CHAR&#39;\n    p[0] = Value(p[1])</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_conditional\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_conditional</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>expression</code>\n                                    <code>CPP_QUESTION</code> <code>expression</code> <code>CPP_COLON</code>\n                                    <code>expression</code></dt>\n                                <dd>&nbsp;</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_conditional(p):\n    &#39;expression : expression CPP_QUESTION expression CPP_COLON expression&#39;\n    try:\n        # Output type must cast up to unsigned if either input is unsigned\n        p[0] = Value(p[3]) if (Value(p[1]).value()!=0) else Value(p[5])\n        try:\n            p[0] = Value(p[0].value(), unsigned = Value(p[3]).unsigned or Value(p[5]).unsigned)\n        except:\n            pass\n    except Exception as e:\n        p[0] = Value(0, exception = e)</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_function_call\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_function_call</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>CPP_ID</code>\n                                    <code>CPP_LPAREN</code> <code>expression</code> <code>CPP_RPAREN</code></dt>\n                                <dd>&nbsp;</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_function_call(p):\n    &#34;expression : CPP_ID CPP_LPAREN expression CPP_RPAREN&#34;\n    try:\n        p.lexer.on_function_call(p)\n    except Exception as e:\n        p[0] = Value(0, exception = e)</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_group\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_group</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>CPP_LPAREN</code>\n                                    <code>expression</code> <code>CPP_RPAREN</code></dt>\n                                <dd>&nbsp;</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_group(t):\n    &#39;expression : CPP_LPAREN expression CPP_RPAREN&#39;\n    t[0] = t[2]</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_identifier\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_identifier</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>CPP_ID</code></dt>\n                                <dd>&nbsp;</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_identifier(p):\n    &#34;expression : CPP_ID&#34;\n    try:\n        p.lexer.on_identifier(p)\n    except Exception as e:\n        p[0] = Value(0, exception = e)</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_number\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_number</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>CPP_INTEGER</code></dt>\n                                <dd>&nbsp;</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_number(p):\n    &#39;expression : CPP_INTEGER&#39;\n    p[0] = Value(p[1])</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_string\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_string</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>CPP_STRING</code></dt>\n                                <dd>| CPP_LESS expression CPP_GREATER</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_string(p):\n    &#34;&#34;&#34;\n    expression : CPP_STRING\n              | CPP_LESS expression CPP_GREATER\n    &#34;&#34;&#34;\n    p[0] = p[1]</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_uminus\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_uminus</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>CPP_MINUS</code>\n                                    <code>expression</code> %<code>prec</code> <code>UMINUS</code></dt>\n                                <dd>&nbsp;</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_uminus(p):\n    &#39;expression : CPP_MINUS expression %prec UMINUS&#39;\n    p[0] = -Value(p[2])</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_unop\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_unop</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>CPP_EXCLAMATION</code>\n                                    <code>expression</code></dt>\n                                <dd>| CPP_TILDE expression</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_unop(p):\n    &#34;&#34;&#34;\n    expression : CPP_EXCLAMATION expression\n              | CPP_TILDE expression\n    &#34;&#34;&#34;\n    try:\n        if p[1] == &#39;!&#39;:\n            p[0] = Value(0) if (Value(p[2]).value()!=0) else Value(1)\n        elif p[1] == &#39;~&#39;:\n            p[0] = ~Value(p[2])\n    except Exception as e:\n        p[0] = Value(0, exception = e)</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.p_expression_uplus\"><code class=\"name flex\">\n<span>def <span class=\"ident\">p_expression_uplus</span></span>(<span>p)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <dl>\n                                <dt><strong><code>expression</code></strong> :&ensp;<code>CPP_PLUS</code>\n                                    <code>expression</code> %<code>prec</code> <code>UPLUS</code></dt>\n                                <dd>&nbsp;</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def p_expression_uplus(p):\n    &#39;expression : CPP_PLUS expression %prec UPLUS&#39;\n    p[0] = +Value(p[2])</code></pre>\n                        </details>\n                    </dd>\n                </dl>\n            </section>\n            <section>\n                <h2 class=\"section-title\" id=\"header-classes\">Classes</h2>\n                <dl>\n                    <dt id=\"pcpp.evaluator.Evaluator\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">Evaluator</span></span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>Evaluator of #if C preprocessor expressions.</p>\n                            <pre><code>&gt;&gt;&gt; e = Evaluator()\n&gt;&gt;&gt; e('5')\nValue(5)\n&gt;&gt;&gt; e('5+6')\nValue(11)\n&gt;&gt;&gt; e('5+6*2')\nValue(17)\n&gt;&gt;&gt; e('5/2+6*2')\nValue(14)\n&gt;&gt;&gt; e('5 &lt; 6 &lt;= 7')\nValue(1)\n&gt;&gt;&gt; e('5 &lt; 6 &amp;&amp; 8 &gt; 7')\nValue(1)\n&gt;&gt;&gt; e('18446744073709551615 == -1')\nValue(1)\n&gt;&gt;&gt; e('-9223372036854775809 == 9223372036854775807')\nValue(1)\n&gt;&gt;&gt; e('-1 &lt; 0U')\nValue(0U)\n&gt;&gt;&gt; e('(( 0L &amp;&amp; 0) || (!0L &amp;&amp; !0 ))')\nValue(1)\n&gt;&gt;&gt; e('(1)?2:3')\nValue(2)\n&gt;&gt;&gt; e('(1 ? -1 : 0) &lt;= 0')\nValue(1)\n&gt;&gt;&gt; e('(1 ? -1 : 0U)')       # Output type of ? must be common between both choices\nValue(18446744073709551615U)\n&gt;&gt;&gt; e('(1 ? -1 : 0U) &lt;= 0')\nValue(0U)\n&gt;&gt;&gt; e('1 &amp;&amp; 10 / 0')         # doctest: +ELLIPSIS\nException(ZeroDivisionError('division by zero'...\n&gt;&gt;&gt; e('0 &amp;&amp; 10 / 0')         # &amp;&amp; must shortcut\nValue(0)\n&gt;&gt;&gt; e('1 ? 10 / 0 : 0')      # doctest: +ELLIPSIS\nException(ZeroDivisionError('division by zero'...\n&gt;&gt;&gt; e('0 ? 10 / 0 : 0')      # ? must shortcut\nValue(0)\n&gt;&gt;&gt; e('(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1')\nValue(0)\n&gt;&gt;&gt; e('1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 != 4')\nValue(0)\n&gt;&gt;&gt; e('(2 || 3) != 1 || (2 &amp;&amp; 3) != 1 || (0 || 4) != 1 || (0 &amp;&amp; 5) != 0')\nValue(0)\n&gt;&gt;&gt; e('-1 &lt;&lt; 3U &gt; 0')\nValue(0)\n&gt;&gt;&gt; e(\"'N' == 78\")\nValue(1)\n&gt;&gt;&gt; e('0x3f == 63')\nValue(1)\n&gt;&gt;&gt; e(\"'\\\\n'\")\nValue(10)\n&gt;&gt;&gt; e(\"'\\\\\\\\'\")\nValue(92)\n&gt;&gt;&gt; e(\"'\\\\n' == 0xA\")\nValue(1)\n&gt;&gt;&gt; e(\"'\\\\\\\\' == 0x5c\")\nValue(1)\n&gt;&gt;&gt; e(\"L'\\\\0' == 0\")\nValue(1)\n&gt;&gt;&gt; e('12 == 12')\nValue(1)\n&gt;&gt;&gt; e('12L == 12')\nValue(1)\n&gt;&gt;&gt; e('-1 &gt;= 0U')\nValue(1U)\n&gt;&gt;&gt; e('(1&lt;&lt;2) == 4')\nValue(1)\n&gt;&gt;&gt; e('(-!+!9) == -1')\nValue(1)\n&gt;&gt;&gt; e('(2 || 3) == 1')\nValue(1)\n&gt;&gt;&gt; e('1L * 3 != 3')\nValue(0)\n&gt;&gt;&gt; e('(!1L != 0) || (-1L != -1)')\nValue(0)\n&gt;&gt;&gt; e('0177777 == 65535')\nValue(1)\n&gt;&gt;&gt; e('0Xffff != 65535 || 0XFfFf == 65535')\nValue(1)\n&gt;&gt;&gt; e('0L != 0 || 0l != 0')\nValue(0)\n&gt;&gt;&gt; e('1U != 1 || 1u == 1')\nValue(1)\n&gt;&gt;&gt; e('0 &lt;= -1')\nValue(0)\n&gt;&gt;&gt; e('1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 == 4')\nValue(1)\n&gt;&gt;&gt; e('(3 ^ 5) == 6')\nValue(1)\n&gt;&gt;&gt; e('(3 | 5) == 7')\nValue(1)\n&gt;&gt;&gt; e('(3 &amp; 5) == 1')\nValue(1)\n&gt;&gt;&gt; e('(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1')\nValue(0)\n&gt;&gt;&gt; e('(0 ? 1 : 2) != 2')\nValue(0)\n&gt;&gt;&gt; e('-1 &lt;&lt; 3U &gt; 0')\nValue(0)\n&gt;&gt;&gt; e('0 &amp;&amp; 10 / 0')\nValue(0)\n&gt;&gt;&gt; e('not_defined &amp;&amp; 10 / not_defined')  # doctest: +ELLIPSIS\nException(SyntaxError('Unknown identifier not_defined'...\n&gt;&gt;&gt; e('0 &amp;&amp; 10 / 0 &gt; 1')\nValue(0)\n&gt;&gt;&gt; e('(0) ? 10 / 0 : 0')\nValue(0)\n&gt;&gt;&gt; e('0 == 0 || 10 / 0 &gt; 1')\nValue(1)\n&gt;&gt;&gt; e('(15 &gt;&gt; 2 &gt;&gt; 1 != 1) || (3 &lt;&lt; 2 &lt;&lt; 1 != 24)')\nValue(0)\n&gt;&gt;&gt; e('(1 | 2) == 3 &amp;&amp; 4 != 5 || 0')\nValue(1)\n&gt;&gt;&gt; e('1  &gt;  0')\nValue(1)\n&gt;&gt;&gt; e(\"'S' != 83\")\nValue(0)\n&gt;&gt;&gt; e(\"'\u001b' != '\u001b'\")\nValue(0)\n&gt;&gt;&gt; e('0 + (1 - (2 + (3 - (4 + (5 - (6 + (7 - (8 + (9 - (10 + (11 - (12 +          (13 - (14 + (15 - (16 + (17 - (18 + (19 - (20 + (21 - (22 + (23 -           (24 + (25 - (26 + (27 - (28 + (29 - (30 + (31 - (32 + 0))))))))))           )))))))))))))))))))))) == 0')\nValue(1)\n&gt;&gt;&gt; e('test_function(X)', functions={'test_function':lambda x: 55})\nValue(55)\n&gt;&gt;&gt; e('test_identifier', identifiers={'test_identifier':11})\nValue(11)\n&gt;&gt;&gt; e('defined(X)', functions={'defined':lambda x: 55})\nValue(55)\n&gt;&gt;&gt; e('defined(X)')  # doctest: +ELLIPSIS\nException(SyntaxError('Unknown function defined'...\n&gt;&gt;&gt; e('__has_include(\"variant\")')  # doctest: +ELLIPSIS\nException(SyntaxError('Unknown function __has_include'...\n&gt;&gt;&gt; e('__has_include(&lt;variant&gt;)')  # doctest: +ELLIPSIS\nException(SyntaxError('Unknown function __has_include'...\n&gt;&gt;&gt; e('5  // comment')\nValue(5)\n&gt;&gt;&gt; e('5  /* comment */')\nValue(5)\n&gt;&gt;&gt; e('5  /* comment // more */')\nValue(5)\n&gt;&gt;&gt; e('5  // /* comment */')\n</code></pre>\n                            <p>Value(5)</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class Evaluator(object):\n    &#34;&#34;&#34;Evaluator of #if C preprocessor expressions.\n    \n    &gt;&gt;&gt; e = Evaluator()\n    &gt;&gt;&gt; e(&#39;5&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5+6&#39;)\n    Value(11)\n    &gt;&gt;&gt; e(&#39;5+6*2&#39;)\n    Value(17)\n    &gt;&gt;&gt; e(&#39;5/2+6*2&#39;)\n    Value(14)\n    &gt;&gt;&gt; e(&#39;5 &lt; 6 &lt;= 7&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;5 &lt; 6 &amp;&amp; 8 &gt; 7&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;18446744073709551615 == -1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;-9223372036854775809 == 9223372036854775807&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;-1 &lt; 0U&#39;)\n    Value(0U)\n    &gt;&gt;&gt; e(&#39;(( 0L &amp;&amp; 0) || (!0L &amp;&amp; !0 ))&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(1)?2:3&#39;)\n    Value(2)\n    &gt;&gt;&gt; e(&#39;(1 ? -1 : 0) &lt;= 0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(1 ? -1 : 0U)&#39;)       # Output type of ? must be common between both choices\n    Value(18446744073709551615U)\n    &gt;&gt;&gt; e(&#39;(1 ? -1 : 0U) &lt;= 0&#39;)\n    Value(0U)\n    &gt;&gt;&gt; e(&#39;1 &amp;&amp; 10 / 0&#39;)         # doctest: +ELLIPSIS\n    Exception(ZeroDivisionError(&#39;division by zero&#39;...\n    &gt;&gt;&gt; e(&#39;0 &amp;&amp; 10 / 0&#39;)         # &amp;&amp; must shortcut\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1 ? 10 / 0 : 0&#39;)      # doctest: +ELLIPSIS\n    Exception(ZeroDivisionError(&#39;division by zero&#39;...\n    &gt;&gt;&gt; e(&#39;0 ? 10 / 0 : 0&#39;)      # ? must shortcut\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 != 4&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(2 || 3) != 1 || (2 &amp;&amp; 3) != 1 || (0 || 4) != 1 || (0 &amp;&amp; 5) != 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;-1 &lt;&lt; 3U &gt; 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#34;&#39;N&#39; == 78&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0x3f == 63&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\n&#39;&#34;)\n    Value(10)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\\\\\\\\\&#39;&#34;)\n    Value(92)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\n&#39; == 0xA&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\\\\\\\\\&#39; == 0x5c&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;L&#39;\\\\\\\\0&#39; == 0&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;12 == 12&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;12L == 12&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;-1 &gt;= 0U&#39;)\n    Value(1U)\n    &gt;&gt;&gt; e(&#39;(1&lt;&lt;2) == 4&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(-!+!9) == -1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(2 || 3) == 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;1L * 3 != 3&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(!1L != 0) || (-1L != -1)&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0177777 == 65535&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0Xffff != 65535 || 0XFfFf == 65535&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0L != 0 || 0l != 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1U != 1 || 1u == 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0 &lt;= -1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 == 4&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 ^ 5) == 6&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 | 5) == 7&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 &amp; 5) == 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(0 ? 1 : 2) != 2&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;-1 &lt;&lt; 3U &gt; 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0 &amp;&amp; 10 / 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;not_defined &amp;&amp; 10 / not_defined&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown identifier not_defined&#39;...\n    &gt;&gt;&gt; e(&#39;0 &amp;&amp; 10 / 0 &gt; 1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(0) ? 10 / 0 : 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0 == 0 || 10 / 0 &gt; 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(15 &gt;&gt; 2 &gt;&gt; 1 != 1) || (3 &lt;&lt; 2 &lt;&lt; 1 != 24)&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(1 | 2) == 3 &amp;&amp; 4 != 5 || 0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;1  &gt;  0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;&#39;\\123&#39; != 83&#34;)\n    Value(0)\n    &gt;&gt;&gt; e(&#34;&#39;\\x1b&#39; != &#39;\\033&#39;&#34;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0 + (1 - (2 + (3 - (4 + (5 - (6 + (7 - (8 + (9 - (10 + (11 - (12 +          (13 - (14 + (15 - (16 + (17 - (18 + (19 - (20 + (21 - (22 + (23 -           (24 + (25 - (26 + (27 - (28 + (29 - (30 + (31 - (32 + 0))))))))))           )))))))))))))))))))))) == 0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;test_function(X)&#39;, functions={&#39;test_function&#39;:lambda x: 55})\n    Value(55)\n    &gt;&gt;&gt; e(&#39;test_identifier&#39;, identifiers={&#39;test_identifier&#39;:11})\n    Value(11)\n    &gt;&gt;&gt; e(&#39;defined(X)&#39;, functions={&#39;defined&#39;:lambda x: 55})\n    Value(55)\n    &gt;&gt;&gt; e(&#39;defined(X)&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown function defined&#39;...\n    &gt;&gt;&gt; e(&#39;__has_include(&#34;variant&#34;)&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown function __has_include&#39;...\n    &gt;&gt;&gt; e(&#39;__has_include(&lt;variant&gt;)&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown function __has_include&#39;...\n    &gt;&gt;&gt; e(&#39;5  // comment&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5  /* comment */&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5  /* comment // more */&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5  // /* comment */&#39;)\n    Value(5)\n    &#34;&#34;&#34;\n#    &gt;&gt;&gt; e(&#39;defined X&#39;, functions={&#39;defined&#39;:lambda x: 55})\n#    Value(55)\n\n    def __init__(self, lexer = None):\n        self.lexer = lexer if lexer is not None else default_lexer()\n        self.parser = yacc.yacc(optimize=in_production,debug=not in_production,write_tables=not in_production)\n\n    class __lexer(object):\n\n        def __init__(self, functions, identifiers):\n            self.__toks = []\n            self.__functions = functions\n            self.__identifiers = identifiers\n\n        def input(self, toks):\n            self.__toks = [tok for tok in toks if tok.type != &#39;CPP_WS&#39; and tok.type != &#39;CPP_LINECONT&#39; and tok.type != &#39;CPP_COMMENT1&#39; and tok.type != &#39;CPP_COMMENT2&#39;]\n            self.__idx = 0\n\n        def token(self):\n            if self.__idx &gt;= len(self.__toks):\n                return None\n            self.__idx = self.__idx + 1\n            return self.__toks[self.__idx - 1]\n\n        def on_function_call(self, p):\n            if p[1] not in self.__functions:\n                raise SyntaxError(&#39;Unknown function %s&#39; % p[1])\n            p[0] = Value(self.__functions[p[1]](p[3]))\n\n        def on_identifier(self, p):\n            if p[1] not in self.__identifiers:\n                raise SyntaxError(&#39;Unknown identifier %s&#39; % p[1])\n            p[0] = Value(self.__identifiers[p[1]])\n            \n    def __call__(self, input, functions = {}, identifiers = {}):\n        &#34;&#34;&#34;Execute a fully macro expanded set of tokens representing an expression,\n        returning the result of the evaluation.\n        &#34;&#34;&#34;\n        if not isinstance(input,list):\n            self.lexer.input(input)\n            input = []\n            while True:\n                tok = self.lexer.token()\n                if not tok:\n                    break\n                input.append(tok)\n        return self.parser.parse(input, lexer = self.__lexer(functions, identifiers))</code></pre>\n                        </details>\n                        <h3>Methods</h3>\n                        <dl>\n                            <dt id=\"pcpp.evaluator.Evaluator.__init__\"><code class=\"name flex\">\n<span>def <span class=\"ident\">__init__</span></span>(<span>self, lexer=None)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Initialize self.\n                                        See help(type(self)) for accurate signature.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def __init__(self, lexer = None):\n    self.lexer = lexer if lexer is not None else default_lexer()\n    self.parser = yacc.yacc(optimize=in_production,debug=not in_production,write_tables=not in_production)</code></pre>\n                                </details>\n                            </dd>\n                        </dl>\n                    </dd>\n                    <dt id=\"pcpp.evaluator.Value\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">Value</span></span>\n<span>(</span><span><small>ancestors:</small> builtins.int)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>A signed or unsigned integer within a preprocessor expression, bounded\n                                to within INT_MIN and INT_MAX, or 0 and UINT_MAX. Signed overflow is handled\n                                like a two's complement CPU, despite being UB, as that's what GCC and clang do.</p>\n                            <pre><code>&gt;&gt;&gt; Value(5)\nValue(5)\n&gt;&gt;&gt; Value('5L')\nValue(5)\n&gt;&gt;&gt; Value('5U')\nValue(5U)\n&gt;&gt;&gt; Value('0')\nValue(0)\n&gt;&gt;&gt; Value('0U')\nValue(0U)\n&gt;&gt;&gt; Value('-1U')\nValue(18446744073709551615U)\n&gt;&gt;&gt; Value(5) * Value(2)\nValue(10)\n&gt;&gt;&gt; Value(5) + Value('2u')\nValue(7U)\n&gt;&gt;&gt; Value(5) * 2\nValue(10)\n&gt;&gt;&gt; Value(5) / 2   # Must return integer\nValue(2)\n&gt;&gt;&gt; Value(50) % 8\nValue(2)\n&gt;&gt;&gt; -Value(5)\nValue(-5)\n&gt;&gt;&gt; +Value(-5)\nValue(-5)\n&gt;&gt;&gt; ~Value(5)\nValue(-6)\n&gt;&gt;&gt; Value(6) &amp; 2\nValue(2)\n&gt;&gt;&gt; Value(4) | 2\nValue(6)\n&gt;&gt;&gt; Value(6) ^ 2\nValue(4)\n&gt;&gt;&gt; Value(2) &lt;&lt; 2\nValue(8)\n&gt;&gt;&gt; Value(8) &gt;&gt; 2\nValue(2)\n&gt;&gt;&gt; Value(9223372036854775808)\nValue(-9223372036854775808)\n&gt;&gt;&gt; Value(-9223372036854775809)\nValue(9223372036854775807)\n&gt;&gt;&gt; Value(18446744073709551615)\nValue(-1)\n&gt;&gt;&gt; Value(False)\nValue(0)\n&gt;&gt;&gt; Value(True)\nValue(1)\n&gt;&gt;&gt; Value(5) == Value(6)\nValue(0)\n&gt;&gt;&gt; Value(5) == Value(5)\nValue(1)\n&gt;&gt;&gt; not Value(2)\nTraceback (most recent call last):\n</code></pre>\n                            <p>&hellip;\n                                AssertionError</p>\n                            <pre><code>&gt;&gt;&gt; Value(4) and Value(2)\nTraceback (most recent call last):\n</code></pre>\n                            <p>&hellip;\n                                AssertionError</p>\n                            <pre><code>&gt;&gt;&gt; Value(5) and not Value(6)\nTraceback (most recent call last):\n</code></pre>\n                            <p>&hellip;\n                                AssertionError</p>\n                            <pre><code>&gt;&gt;&gt; Value('0x3f')\nValue(63)\n&gt;&gt;&gt; Value('077')\nValue(63)\n&gt;&gt;&gt; Value(\"'N'\")\nValue(78)\n&gt;&gt;&gt; Value(\"L'N'\")\nValue(78)\n&gt;&gt;&gt; Value(\"'\\n'\")\nValue(10)\n&gt;&gt;&gt; Value(\"'\\\\n'\")\nValue(10)\n&gt;&gt;&gt; Value(\"'\\\\'\")\nValue(92)\n&gt;&gt;&gt; Value(\"'\\'\")\nTraceback (most recent call last):\n</code></pre>\n                            <dl>\n                                <dt>&hellip;</dt>\n                                <dt><strong><code>SyntaxError</code></strong> :&ensp;<code>Empty</code>\n                                    <code>character</code> <code>escape</code> <code>sequence</code></dt>\n                                <dd>&nbsp;</dd>\n                            </dl>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class Value(INTBASETYPE):\n    &#34;&#34;&#34;A signed or unsigned integer within a preprocessor expression, bounded\n    to within INT_MIN and INT_MAX, or 0 and UINT_MAX. Signed overflow is handled\n    like a two&#39;s complement CPU, despite being UB, as that&#39;s what GCC and clang do.\n    \n    &gt;&gt;&gt; Value(5)\n    Value(5)\n    &gt;&gt;&gt; Value(&#39;5L&#39;)\n    Value(5)\n    &gt;&gt;&gt; Value(&#39;5U&#39;)\n    Value(5U)\n    &gt;&gt;&gt; Value(&#39;0&#39;)\n    Value(0)\n    &gt;&gt;&gt; Value(&#39;0U&#39;)\n    Value(0U)\n    &gt;&gt;&gt; Value(&#39;-1U&#39;)\n    Value(18446744073709551615U)\n    &gt;&gt;&gt; Value(5) * Value(2)\n    Value(10)\n    &gt;&gt;&gt; Value(5) + Value(&#39;2u&#39;)\n    Value(7U)\n    &gt;&gt;&gt; Value(5) * 2\n    Value(10)\n    &gt;&gt;&gt; Value(5) / 2   # Must return integer\n    Value(2)\n    &gt;&gt;&gt; Value(50) % 8\n    Value(2)\n    &gt;&gt;&gt; -Value(5)\n    Value(-5)\n    &gt;&gt;&gt; +Value(-5)\n    Value(-5)\n    &gt;&gt;&gt; ~Value(5)\n    Value(-6)\n    &gt;&gt;&gt; Value(6) &amp; 2\n    Value(2)\n    &gt;&gt;&gt; Value(4) | 2\n    Value(6)\n    &gt;&gt;&gt; Value(6) ^ 2\n    Value(4)\n    &gt;&gt;&gt; Value(2) &lt;&lt; 2\n    Value(8)\n    &gt;&gt;&gt; Value(8) &gt;&gt; 2\n    Value(2)\n    &gt;&gt;&gt; Value(9223372036854775808)\n    Value(-9223372036854775808)\n    &gt;&gt;&gt; Value(-9223372036854775809)\n    Value(9223372036854775807)\n    &gt;&gt;&gt; Value(18446744073709551615)\n    Value(-1)\n    &gt;&gt;&gt; Value(False)\n    Value(0)\n    &gt;&gt;&gt; Value(True)\n    Value(1)\n    &gt;&gt;&gt; Value(5) == Value(6)\n    Value(0)\n    &gt;&gt;&gt; Value(5) == Value(5)\n    Value(1)\n    &gt;&gt;&gt; not Value(2)\n    Traceback (most recent call last):\n    ...\n    AssertionError\n    &gt;&gt;&gt; Value(4) and Value(2)\n    Traceback (most recent call last):\n    ...\n    AssertionError\n    &gt;&gt;&gt; Value(5) and not Value(6)\n    Traceback (most recent call last):\n    ...\n    AssertionError\n    &gt;&gt;&gt; Value(&#39;0x3f&#39;)\n    Value(63)\n    &gt;&gt;&gt; Value(&#39;077&#39;)\n    Value(63)\n    &gt;&gt;&gt; Value(&#34;&#39;N&#39;&#34;)\n    Value(78)\n    &gt;&gt;&gt; Value(&#34;L&#39;N&#39;&#34;)\n    Value(78)\n    &gt;&gt;&gt; Value(&#34;&#39;\\\\n&#39;&#34;)\n    Value(10)\n    &gt;&gt;&gt; Value(&#34;&#39;\\\\\\\\n&#39;&#34;)\n    Value(10)\n    &gt;&gt;&gt; Value(&#34;&#39;\\\\\\\\&#39;&#34;)\n    Value(92)\n    &gt;&gt;&gt; Value(&#34;&#39;\\\\&#39;&#34;)\n    Traceback (most recent call last):\n    ...\n    SyntaxError: Empty character escape sequence\n    &#34;&#34;&#34;\n    INT_MIN = -(1 &lt;&lt; (INTMAXBITS - 1))\n    INT_MAX = (1 &lt;&lt; (INTMAXBITS - 1)) - 1\n    INT_MASK = (1 &lt;&lt; INTMAXBITS) - 1\n    UINT_MIN = 0\n    UINT_MAX = (1 &lt;&lt; INTMAXBITS) - 1\n    @classmethod\n    def __sclamp(cls, value):\n        value = INTBASETYPE(value)\n        return ((value - cls.INT_MIN) &amp; cls.INT_MASK) + cls.INT_MIN\n    @classmethod\n    def __uclamp(cls, value):\n        value = INTBASETYPE(value)\n        return value &amp; cls.UINT_MAX\n    def __new__(cls, value, unsigned = False, exception = None):\n        if isinstance(value, Value):\n            unsigned = value.unsigned\n            exception = value.exception\n        elif isinstance(value, INTBASETYPE) or isinstance(value, int) or isinstance(value, float):\n            value = cls.__uclamp(value) if unsigned else cls.__sclamp(value)\n        elif isinstance(value, STRING_TYPES):\n            if (value.startswith(&#34;L&#39;&#34;) or value[0] == &#34;&#39;&#34;) and value[-1] == &#34;&#39;&#34;:\n                startidx = 2 if value.startswith(&#34;L&#39;&#34;) else 1\n                #print(&#34;1. ***&#34;, value, file = sys.stderr)\n                value = value[startidx:-1]\n                if len(value) == 0:\n                    raise SyntaxError(&#39;Empty character escape sequence&#39;)\n                #print(&#34;2. ***&#34;, value, file = sys.stderr)\n                value = _expand_escape_sequences_pat.sub(lambda x: codecs.decode(x.group(0), &#39;unicode-escape&#39;), value)\n                #print(&#34;3. ***&#34;, value, file = sys.stderr)\n                x = INTBASETYPE(ord(value))\n                #print(&#34;4. ***&#34;, x, file = sys.stderr)\n            elif value.startswith(&#39;0x&#39;) or value.startswith(&#39;0X&#39;):\n                # Strip any terminators\n                while not ((value[-1] &gt;= &#39;0&#39; and value[-1] &lt;= &#39;9&#39;) or (value[-1] &gt;= &#39;a&#39; and value[-1] &lt;= &#39;f&#39;) or (value[-1] &gt;= &#39;A&#39; and value[-1] &lt;= &#39;F&#39;)):\n                    if value[-1] == &#39;u&#39; or value[-1] == &#39;U&#39;:\n                        unsigned = True\n                    value = value[:-1]\n                x = INTBASETYPE(value, base = 16)\n            elif value.startswith(&#39;0&#39;):\n                # Strip any terminators\n                while not (value[-1] &gt;= &#39;0&#39; and value[-1] &lt;= &#39;7&#39;):\n                    if value[-1] == &#39;u&#39; or value[-1] == &#39;U&#39;:\n                        unsigned = True\n                    value = value[:-1]\n                x = INTBASETYPE(value, base = 8)\n            else:\n                # Strip any terminators\n                while not (value[-1] &gt;= &#39;0&#39; and value[-1] &lt;= &#39;9&#39;):\n                    if value[-1] == &#39;u&#39; or value[-1] == &#39;U&#39;:\n                        unsigned = True\n                    value = value[:-1]\n                x = INTBASETYPE(value)\n            value = cls.__uclamp(x) if unsigned else cls.__sclamp(x)\n            #assert x == value\n        else:\n            print(&#39;Unknown value type: %s&#39; % repr(type(value)), file = sys.stderr)\n            assert False  # Input is an unrecognised type\n        inst = super(Value, cls).__new__(cls, value)\n        inst.unsigned = unsigned\n        inst.exception = exception\n        return inst\n    def value(self):\n        if self.exception is not None:\n            raise self.exception\n        return INTBASETYPE(self)\n    def __add__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) + self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__add__(other))\n    def __sub__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) - self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__sub__(other))\n    def __mul__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) * self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__mul__(other))\n    def __div__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) / self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__div__(other))\n    def __truediv__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) / self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__truediv__(other))\n    def __mod__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) % self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__mod__(other))\n    def __neg__(self):\n        if self.exception is not None:\n            return self\n        return Value(super(Value, self).__neg__(), self.unsigned)\n    def __invert__(self):\n        if self.exception is not None:\n            return self\n        return Value(super(Value, self).__invert__(), self.unsigned)\n    def __and__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &amp; self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__and__(other))\n    def __or__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) | self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__or__(other))\n    def __pos__(self):\n        if self.exception is not None:\n            return self\n        return Value(super(Value, self).__pos__())\n    def __pow__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) ** self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__pow__(other))\n    def __lshift__(self, other):\n        if self.exception is not None:\n            return self\n        # Ignore other signedness\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &lt;&lt; self.__uclamp(other), True) if (self.unsigned) else Value(super(Value, self).__lshift__(other))\n    def __rshift__(self, other):\n        if self.exception is not None:\n            return self\n        # Ignore other signedness\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &gt;&gt; self.__uclamp(other), True) if (self.unsigned) else Value(super(Value, self).__rshift__(other))\n    def __xor__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) ^ self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__xor__(other))\n    def __repr__(self):\n        if self.exception is not None:\n            return &#34;Exception(%s)&#34; % repr(self.exception)\n        elif self.unsigned:\n            return &#34;Value(%dU)&#34; % INTBASETYPE(self)\n        else:\n            return &#34;Value(%d)&#34; % INTBASETYPE(self)\n    def __bool__(self):\n        assert False  # Do not use Python logical operations\n    def __nonzero__(self):\n        assert False  # Do not use Python logical operations\n    def __cmp__(self, other):\n        assert False\n    def __lt__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &lt; self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) &lt; self.__sclamp(other), False)\n    def __le__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &lt;= self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) &lt;= self.__sclamp(other), False)\n    def __eq__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) == self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) == self.__sclamp(other), False)\n    def __ne__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) != self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) != self.__sclamp(other), False)\n    def __ge__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &gt;= self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) &gt;= self.__sclamp(other), False)\n    def __gt__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) &gt; self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) &gt; self.__sclamp(other), False)</code></pre>\n                        </details>\n                        <h3>Class variables</h3>\n                        <dl>\n                            <dt id=\"pcpp.evaluator.Value.INT_MASK\"><code\n                                    class=\"name\">var <span class=\"ident\">INT_MASK</span></code></dt>\n                            <dd>\n                                <section class=\"desc\"></section>\n                            </dd>\n                            <dt id=\"pcpp.evaluator.Value.INT_MAX\"><code\n                                    class=\"name\">var <span class=\"ident\">INT_MAX</span></code></dt>\n                            <dd>\n                                <section class=\"desc\"></section>\n                            </dd>\n                            <dt id=\"pcpp.evaluator.Value.INT_MIN\"><code\n                                    class=\"name\">var <span class=\"ident\">INT_MIN</span></code></dt>\n                            <dd>\n                                <section class=\"desc\"></section>\n                            </dd>\n                            <dt id=\"pcpp.evaluator.Value.UINT_MAX\"><code\n                                    class=\"name\">var <span class=\"ident\">UINT_MAX</span></code></dt>\n                            <dd>\n                                <section class=\"desc\"></section>\n                            </dd>\n                            <dt id=\"pcpp.evaluator.Value.UINT_MIN\"><code\n                                    class=\"name\">var <span class=\"ident\">UINT_MIN</span></code></dt>\n                            <dd>\n                                <section class=\"desc\"></section>\n                            </dd>\n                        </dl>\n                        <h3>Methods</h3>\n                        <dl>\n                            <dt id=\"pcpp.evaluator.Value.value\"><code class=\"name flex\">\n<span>def <span class=\"ident\">value</span></span>(<span>self)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\"></section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def value(self):\n    if self.exception is not None:\n        raise self.exception\n    return INTBASETYPE(self)</code></pre>\n                                </details>\n                            </dd>\n                        </dl>\n                    </dd>\n                </dl>\n            </section>\n        </article>\n        <nav id=\"sidebar\">\n            <h1>Index</h1>\n            <div class=\"toc\">\n                <ul></ul>\n            </div>\n            <ul id=\"index\">\n                <li>\n                    <h3>Super-module</h3>\n                    <ul>\n                        <li><code><a title=\"pcpp\" href=\"index.html\">pcpp</a></code></li>\n                    </ul>\n                </li>\n                <li>\n                    <h3><a href=\"#header-functions\">Functions</a></h3>\n                    <ul class=\"\">\n                        <li><code><a title=\"pcpp.evaluator.p_error\" href=\"#pcpp.evaluator.p_error\">p_error</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_binop\" href=\"#pcpp.evaluator.p_expression_binop\">p_expression_binop</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_character\" href=\"#pcpp.evaluator.p_expression_character\">p_expression_character</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_conditional\" href=\"#pcpp.evaluator.p_expression_conditional\">p_expression_conditional</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_function_call\" href=\"#pcpp.evaluator.p_expression_function_call\">p_expression_function_call</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_group\" href=\"#pcpp.evaluator.p_expression_group\">p_expression_group</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_identifier\" href=\"#pcpp.evaluator.p_expression_identifier\">p_expression_identifier</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_number\" href=\"#pcpp.evaluator.p_expression_number\">p_expression_number</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_string\" href=\"#pcpp.evaluator.p_expression_string\">p_expression_string</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_uminus\" href=\"#pcpp.evaluator.p_expression_uminus\">p_expression_uminus</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_unop\" href=\"#pcpp.evaluator.p_expression_unop\">p_expression_unop</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.evaluator.p_expression_uplus\" href=\"#pcpp.evaluator.p_expression_uplus\">p_expression_uplus</a></code>\n                        </li>\n                    </ul>\n                </li>\n                <li>\n                    <h3><a href=\"#header-classes\">Classes</a></h3>\n                    <ul>\n                        <li>\n                            <h4><code><a title=\"pcpp.evaluator.Evaluator\" href=\"#pcpp.evaluator.Evaluator\">Evaluator</a></code>\n                            </h4>\n                            <ul class=\"\">\n                                <li><code><a title=\"pcpp.evaluator.Evaluator.__init__\" href=\"#pcpp.evaluator.Evaluator.__init__\">__init__</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                        <li>\n                            <h4><code><a title=\"pcpp.evaluator.Value\" href=\"#pcpp.evaluator.Value\">Value</a></code></h4>\n                            <ul class=\"two-column\">\n                                <li><code><a title=\"pcpp.evaluator.Value.INT_MASK\" href=\"#pcpp.evaluator.Value.INT_MASK\">INT_MASK</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.evaluator.Value.INT_MAX\" href=\"#pcpp.evaluator.Value.INT_MAX\">INT_MAX</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.evaluator.Value.INT_MIN\" href=\"#pcpp.evaluator.Value.INT_MIN\">INT_MIN</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.evaluator.Value.UINT_MAX\" href=\"#pcpp.evaluator.Value.UINT_MAX\">UINT_MAX</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.evaluator.Value.UINT_MIN\" href=\"#pcpp.evaluator.Value.UINT_MIN\">UINT_MIN</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.evaluator.Value.value\" href=\"#pcpp.evaluator.Value.value\">value</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                    </ul>\n                </li>\n            </ul>\n        </nav>\n    </main>\n    <footer id=\"footer\">\n        <p>Generated by <a href=\"https://pdoc3.github.io/pdoc\"><cite>pdoc</cite> 0.5.3</a>.</p>\n    </footer>\n    <script src=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js\"></script>\n    <script>hljs.initHighlightingOnLoad()</script>\n</body>\n\n</html>"
  },
  {
    "path": "doc/index.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n<head>\n<meta charset=\"utf-8\">\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1, minimum-scale=1\" />\n<meta name=\"generator\" content=\"pdoc 0.5.3\" />\n<title>pcpp API documentation</title>\n<meta name=\"description\" content=\"\" />\n<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>\n<link href='https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/8.0.0/sanitize.min.css' rel='stylesheet'>\n<link href=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css\" rel=\"stylesheet\">\n<style>.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^=\"header-\"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{font-weight:bold}#index h4 + ul{margin-bottom:.6em}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.name small{font-weight:normal}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase;cursor:pointer}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}.admonition{padding:.1em .5em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>\n<style media=\"screen and (min-width: 700px)\">@media screen and (min-width:700px){#sidebar{width:30%}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>\n<style media=\"print\">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:\" (\" attr(href) \")\";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:\" (\" attr(title) \")\"}.ir a:after,a[href^=\"javascript:\"]:after,a[href^=\"#\"]:after{content:\"\"}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>\n</head>\n<body>\n<main>\n<article id=\"content\">\n<header>\n<h1 class=\"title\"><code>pcpp</code> module</h1>\n</header>\n<section id=\"section-intro\">\n<details class=\"source\">\n<summary>Source code</summary>\n<pre><code class=\"python\">from .evaluator import Evaluator\nfrom .parser import Action, OutputDirective\nfrom .pcmd import main, version, CmdPreprocessor\nfrom .preprocessor import Preprocessor\n__version__ = version</code></pre>\n</details>\n</section>\n<section>\n<h2 class=\"section-title\" id=\"header-submodules\">Sub-modules</h2>\n<dl>\n<dt><code class=\"name\"><a title=\"pcpp.evaluator\" href=\"evaluator.html\">pcpp.evaluator</a></code></dt>\n<dd>\n<section class=\"desc\"></section>\n</dd>\n<dt><code class=\"name\"><a title=\"pcpp.lextab\" href=\"lextab.html\">pcpp.lextab</a></code></dt>\n<dd>\n<section class=\"desc\"></section>\n</dd>\n<dt><code class=\"name\"><a title=\"pcpp.parser\" href=\"parser.html\">pcpp.parser</a></code></dt>\n<dd>\n<section class=\"desc\"></section>\n</dd>\n<dt><code class=\"name\"><a title=\"pcpp.parsetab\" href=\"parsetab.html\">pcpp.parsetab</a></code></dt>\n<dd>\n<section class=\"desc\"></section>\n</dd>\n<dt><code class=\"name\"><a title=\"pcpp.pcmd\" href=\"pcmd.html\">pcpp.pcmd</a></code></dt>\n<dd>\n<section class=\"desc\"></section>\n</dd>\n<dt><code class=\"name\"><a title=\"pcpp.preprocessor\" href=\"preprocessor.html\">pcpp.preprocessor</a></code></dt>\n<dd>\n<section class=\"desc\"></section>\n</dd>\n</dl>\n</section>\n<section>\n</section>\n<section>\n</section>\n<section>\n</section>\n</article>\n<nav id=\"sidebar\">\n<h1>Index</h1>\n<div class=\"toc\">\n<ul></ul>\n</div>\n<ul id=\"index\">\n<li><h3><a href=\"#header-submodules\">Sub-modules</a></h3>\n<ul>\n<li><code><a title=\"pcpp.evaluator\" href=\"evaluator.html\">pcpp.evaluator</a></code></li>\n<li><code><a title=\"pcpp.lextab\" href=\"lextab.html\">pcpp.lextab</a></code></li>\n<li><code><a title=\"pcpp.parser\" href=\"parser.html\">pcpp.parser</a></code></li>\n<li><code><a title=\"pcpp.parsetab\" href=\"parsetab.html\">pcpp.parsetab</a></code></li>\n<li><code><a title=\"pcpp.pcmd\" href=\"pcmd.html\">pcpp.pcmd</a></code></li>\n<li><code><a title=\"pcpp.preprocessor\" href=\"preprocessor.html\">pcpp.preprocessor</a></code></li>\n</ul>\n</li>\n</ul>\n</nav>\n</main>\n<footer id=\"footer\">\n<p>Generated by <a href=\"https://pdoc3.github.io/pdoc\"><cite>pdoc</cite> 0.5.3</a>.</p>\n</footer>\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js\"></script>\n<script>hljs.initHighlightingOnLoad()</script>\n</body>\n</html>"
  },
  {
    "path": "doc/lextab.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n<head>\n<meta charset=\"utf-8\">\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1, minimum-scale=1\" />\n<meta name=\"generator\" content=\"pdoc 0.5.3\" />\n<title>pcpp.lextab API documentation</title>\n<meta name=\"description\" content=\"\" />\n<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>\n<link href='https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/8.0.0/sanitize.min.css' rel='stylesheet'>\n<link href=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css\" rel=\"stylesheet\">\n<style>.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^=\"header-\"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{font-weight:bold}#index h4 + ul{margin-bottom:.6em}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.name small{font-weight:normal}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase;cursor:pointer}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}.admonition{padding:.1em .5em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>\n<style media=\"screen and (min-width: 700px)\">@media screen and (min-width:700px){#sidebar{width:30%}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>\n<style media=\"print\">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:\" (\" attr(href) \")\";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:\" (\" attr(title) \")\"}.ir a:after,a[href^=\"javascript:\"]:after,a[href^=\"#\"]:after{content:\"\"}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>\n</head>\n<body>\n<main>\n<article id=\"content\">\n<header>\n<h1 class=\"title\"><code>pcpp.lextab</code> module</h1>\n</header>\n<section id=\"section-intro\">\n<details class=\"source\">\n<summary>Source code</summary>\n<pre><code class=\"python\"># lextab.py. This file automatically created by PLY (version 3.11). Don&#39;t edit!\n_tabversion   = &#39;3.10&#39;\n_lextokens    = set((&#39;CPP_AMPERSAND&#39;, &#39;CPP_ANDEQUAL&#39;, &#39;CPP_BAR&#39;, &#39;CPP_BSLASH&#39;, &#39;CPP_CHAR&#39;, &#39;CPP_COLON&#39;, &#39;CPP_COMMA&#39;, &#39;CPP_COMMENT1&#39;, &#39;CPP_COMMENT2&#39;, &#39;CPP_DEREFERENCE&#39;, &#39;CPP_DIVIDEEQUAL&#39;, &#39;CPP_DOT&#39;, &#39;CPP_DPOUND&#39;, &#39;CPP_DQUOTE&#39;, &#39;CPP_EQUAL&#39;, &#39;CPP_EQUALITY&#39;, &#39;CPP_EXCLAMATION&#39;, &#39;CPP_FLOAT&#39;, &#39;CPP_FSLASH&#39;, &#39;CPP_GREATER&#39;, &#39;CPP_GREATEREQUAL&#39;, &#39;CPP_HAT&#39;, &#39;CPP_ID&#39;, &#39;CPP_INEQUALITY&#39;, &#39;CPP_INTEGER&#39;, &#39;CPP_LBRACKET&#39;, &#39;CPP_LCURLY&#39;, &#39;CPP_LESS&#39;, &#39;CPP_LESSEQUAL&#39;, &#39;CPP_LINECONT&#39;, &#39;CPP_LOGICALAND&#39;, &#39;CPP_LOGICALOR&#39;, &#39;CPP_LPAREN&#39;, &#39;CPP_LSHIFT&#39;, &#39;CPP_LSHIFTEQUAL&#39;, &#39;CPP_MINUS&#39;, &#39;CPP_MINUSEQUAL&#39;, &#39;CPP_MINUSMINUS&#39;, &#39;CPP_MULTIPLYEQUAL&#39;, &#39;CPP_OREQUAL&#39;, &#39;CPP_PERCENT&#39;, &#39;CPP_PERCENTEQUAL&#39;, &#39;CPP_PLUS&#39;, &#39;CPP_PLUSEQUAL&#39;, &#39;CPP_PLUSPLUS&#39;, &#39;CPP_POUND&#39;, &#39;CPP_QUESTION&#39;, &#39;CPP_RBRACKET&#39;, &#39;CPP_RCURLY&#39;, &#39;CPP_RPAREN&#39;, &#39;CPP_RSHIFT&#39;, &#39;CPP_RSHIFTEQUAL&#39;, &#39;CPP_SEMICOLON&#39;, &#39;CPP_SQUOTE&#39;, &#39;CPP_STAR&#39;, &#39;CPP_STRING&#39;, &#39;CPP_TILDE&#39;, &#39;CPP_WS&#39;, &#39;CPP_XOREQUAL&#39;))\n_lexreflags   = 64\n_lexliterals  = &#39;+-*/%|&amp;~^&lt;&gt;=!?()[]{}.,;:\\\\\\&#39;&#34;&#39;\n_lexstateinfo = {&#39;INITIAL&#39;: &#39;inclusive&#39;}\n_lexstatere   = {&#39;INITIAL&#39;: [(&#39;(?P&lt;t_CPP_WS&gt;([ \\\\t]+|\\\\n))|(?P&lt;t_CPP_LINECONT&gt;\\\\\\\\[ \\\\t]*\\\\n)|(?P&lt;t_CPP_INTEGER&gt;(((((0x)|(0X))[0-9a-fA-F]+)|(\\\\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?))|(?P&lt;t_CPP_STRING&gt;\\\\&#34;([^\\\\\\\\\\\\n]|(\\\\\\\\(.|\\\\n)))*?\\\\&#34;)|(?P&lt;t_CPP_CHAR&gt;(L)?\\\\\\&#39;([^\\\\\\\\\\\\n]|(\\\\\\\\(.|\\\\n)))*?\\\\\\&#39;)|(?P&lt;t_CPP_COMMENT1&gt;(/\\\\*(.|\\\\n)*?\\\\*/))|(?P&lt;t_CPP_COMMENT2&gt;(//[^\\\\n]*))|(?P&lt;t_CPP_FLOAT&gt;((\\\\d+)(\\\\.\\\\d+)(e(\\\\+|-)?(\\\\d+))?|(\\\\d+)e(\\\\+|-)?(\\\\d+))([lL]|[fF])?)|(?P&lt;t_CPP_ID&gt;[A-Za-z_][\\\\w_]*)|(?P&lt;t_CPP_LOGICALOR&gt;\\\\|\\\\|)|(?P&lt;t_CPP_PLUSPLUS&gt;\\\\+\\\\+)|(?P&lt;t_CPP_DPOUND&gt;\\\\#\\\\#)|(?P&lt;t_CPP_LSHIFTEQUAL&gt;&lt;&lt;=)|(?P&lt;t_CPP_OREQUAL&gt;\\\\|=)|(?P&lt;t_CPP_PLUSEQUAL&gt;\\\\+=)|(?P&lt;t_CPP_RSHIFTEQUAL&gt;&gt;&gt;=)|(?P&lt;t_CPP_MULTIPLYEQUAL&gt;\\\\*=)|(?P&lt;t_CPP_BAR&gt;\\\\|)|(?P&lt;t_CPP_DIVIDEEQUAL&gt;/=)|(?P&lt;t_CPP_POUND&gt;\\\\#)|(?P&lt;t_CPP_PERCENTEQUAL&gt;%=)|(?P&lt;t_CPP_DEREFERENCE&gt;-&gt;)|(?P&lt;t_CPP_RPAREN&gt;\\\\))|(?P&lt;t_CPP_ANDEQUAL&gt;&amp;=)|(?P&lt;t_CPP_RBRACKET&gt;\\\\])|(?P&lt;t_CPP_LPAREN&gt;\\\\()|(?P&lt;t_CPP_RSHIFT&gt;&gt;&gt;)|(?P&lt;t_CPP_LESSEQUAL&gt;&lt;=)|(?P&lt;t_CPP_HAT&gt;\\\\^)|(?P&lt;t_CPP_LOGICALAND&gt;&amp;&amp;)|(?P&lt;t_CPP_EQUALITY&gt;==)|(?P&lt;t_CPP_GREATEREQUAL&gt;&gt;=)|(?P&lt;t_CPP_BSLASH&gt;\\\\\\\\)|(?P&lt;t_CPP_MINUSEQUAL&gt;-=)|(?P&lt;t_CPP_DOT&gt;\\\\.)|(?P&lt;t_CPP_MINUSMINUS&gt;--)|(?P&lt;t_CPP_LBRACKET&gt;\\\\[)|(?P&lt;t_CPP_PLUS&gt;\\\\+)|(?P&lt;t_CPP_XOREQUAL&gt;^=)|(?P&lt;t_CPP_STAR&gt;\\\\*)|(?P&lt;t_CPP_QUESTION&gt;\\\\?)|(?P&lt;t_CPP_LSHIFT&gt;&lt;&lt;)|(?P&lt;t_CPP_INEQUALITY&gt;!=)|(?P&lt;t_CPP_DQUOTE&gt;&#34;)|(?P&lt;t_CPP_MINUS&gt;-)|(?P&lt;t_CPP_RCURLY&gt;})|(?P&lt;t_CPP_GREATER&gt;&gt;)|(?P&lt;t_CPP_LESS&gt;&lt;)|(?P&lt;t_CPP_SQUOTE&gt;\\&#39;)|(?P&lt;t_CPP_EXCLAMATION&gt;!)|(?P&lt;t_CPP_LCURLY&gt;{)|(?P&lt;t_CPP_EQUAL&gt;=)|(?P&lt;t_CPP_FSLASH&gt;/)|(?P&lt;t_CPP_COLON&gt;:)|(?P&lt;t_CPP_AMPERSAND&gt;&amp;)|(?P&lt;t_CPP_COMMA&gt;,)|(?P&lt;t_CPP_TILDE&gt;~)|(?P&lt;t_CPP_SEMICOLON&gt;;)|(?P&lt;t_CPP_PERCENT&gt;%)&#39;, [None, (&#39;t_CPP_WS&#39;, &#39;CPP_WS&#39;), None, (&#39;t_CPP_LINECONT&#39;, &#39;CPP_LINECONT&#39;), (&#39;t_CPP_INTEGER&#39;, &#39;CPP_INTEGER&#39;), None, None, None, None, None, None, None, None, (&#39;t_CPP_STRING&#39;, &#39;CPP_STRING&#39;), None, None, None, (&#39;t_CPP_CHAR&#39;, &#39;CPP_CHAR&#39;), None, None, None, None, (&#39;t_CPP_COMMENT1&#39;, &#39;CPP_COMMENT1&#39;), None, None, (&#39;t_CPP_COMMENT2&#39;, &#39;CPP_COMMENT2&#39;), None, (None, &#39;CPP_FLOAT&#39;), None, None, None, None, None, None, None, None, None, None, (None, &#39;CPP_ID&#39;), (None, &#39;CPP_LOGICALOR&#39;), (None, &#39;CPP_PLUSPLUS&#39;), (None, &#39;CPP_DPOUND&#39;), (None, &#39;CPP_LSHIFTEQUAL&#39;), (None, &#39;CPP_OREQUAL&#39;), (None, &#39;CPP_PLUSEQUAL&#39;), (None, &#39;CPP_RSHIFTEQUAL&#39;), (None, &#39;CPP_MULTIPLYEQUAL&#39;), (None, &#39;CPP_BAR&#39;), (None, &#39;CPP_DIVIDEEQUAL&#39;), (None, &#39;CPP_POUND&#39;), (None, &#39;CPP_PERCENTEQUAL&#39;), (None, &#39;CPP_DEREFERENCE&#39;), (None, &#39;CPP_RPAREN&#39;), (None, &#39;CPP_ANDEQUAL&#39;), (None, &#39;CPP_RBRACKET&#39;), (None, &#39;CPP_LPAREN&#39;), (None, &#39;CPP_RSHIFT&#39;), (None, &#39;CPP_LESSEQUAL&#39;), (None, &#39;CPP_HAT&#39;), (None, &#39;CPP_LOGICALAND&#39;), (None, &#39;CPP_EQUALITY&#39;), (None, &#39;CPP_GREATEREQUAL&#39;), (None, &#39;CPP_BSLASH&#39;), (None, &#39;CPP_MINUSEQUAL&#39;), (None, &#39;CPP_DOT&#39;), (None, &#39;CPP_MINUSMINUS&#39;), (None, &#39;CPP_LBRACKET&#39;), (None, &#39;CPP_PLUS&#39;), (None, &#39;CPP_XOREQUAL&#39;), (None, &#39;CPP_STAR&#39;), (None, &#39;CPP_QUESTION&#39;), (None, &#39;CPP_LSHIFT&#39;), (None, &#39;CPP_INEQUALITY&#39;), (None, &#39;CPP_DQUOTE&#39;), (None, &#39;CPP_MINUS&#39;), (None, &#39;CPP_RCURLY&#39;), (None, &#39;CPP_GREATER&#39;), (None, &#39;CPP_LESS&#39;), (None, &#39;CPP_SQUOTE&#39;), (None, &#39;CPP_EXCLAMATION&#39;), (None, &#39;CPP_LCURLY&#39;), (None, &#39;CPP_EQUAL&#39;), (None, &#39;CPP_FSLASH&#39;), (None, &#39;CPP_COLON&#39;), (None, &#39;CPP_AMPERSAND&#39;), (None, &#39;CPP_COMMA&#39;), (None, &#39;CPP_TILDE&#39;), (None, &#39;CPP_SEMICOLON&#39;), (None, &#39;CPP_PERCENT&#39;)])]}\n_lexstateignore = {&#39;INITIAL&#39;: &#39;&#39;}\n_lexstateerrorf = {&#39;INITIAL&#39;: &#39;t_error&#39;}\n_lexstateeoff = {}</code></pre>\n</details>\n</section>\n<section>\n</section>\n<section>\n</section>\n<section>\n</section>\n<section>\n</section>\n</article>\n<nav id=\"sidebar\">\n<h1>Index</h1>\n<div class=\"toc\">\n<ul></ul>\n</div>\n<ul id=\"index\">\n<li><h3>Super-module</h3>\n<ul>\n<li><code><a title=\"pcpp\" href=\"index.html\">pcpp</a></code></li>\n</ul>\n</li>\n</ul>\n</nav>\n</main>\n<footer id=\"footer\">\n<p>Generated by <a href=\"https://pdoc3.github.io/pdoc\"><cite>pdoc</cite> 0.5.3</a>.</p>\n</footer>\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js\"></script>\n<script>hljs.initHighlightingOnLoad()</script>\n</body>\n</html>"
  },
  {
    "path": "doc/parser.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n\n<head>\n    <meta charset=\"utf-8\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, minimum-scale=1\" />\n    <meta name=\"generator\" content=\"pdoc 0.5.3\" />\n    <title>pcpp.parser API documentation</title>\n    <meta name=\"description\" content=\"\" />\n    <link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>\n    <link href='https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/8.0.0/sanitize.min.css' rel='stylesheet'>\n    <link href=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css\" rel=\"stylesheet\">\n    <style>\n        .flex {\n            display: flex !important\n        }\n\n        body {\n            line-height: 1.5em\n        }\n\n        #content {\n            padding: 20px\n        }\n\n        #sidebar {\n            padding: 30px;\n            overflow: hidden\n        }\n\n        .http-server-breadcrumbs {\n            font-size: 130%;\n            margin: 0 0 15px 0\n        }\n\n        #footer {\n            font-size: .75em;\n            padding: 5px 30px;\n            border-top: 1px solid #ddd;\n            text-align: right\n        }\n\n        #footer p {\n            margin: 0 0 0 1em;\n            display: inline-block\n        }\n\n        #footer p:last-child {\n            margin-right: 30px\n        }\n\n        h1,\n        h2,\n        h3,\n        h4,\n        h5 {\n            font-weight: 300\n        }\n\n        h1 {\n            font-size: 2.5em;\n            line-height: 1.1em\n        }\n\n        h2 {\n            font-size: 1.75em;\n            margin: 1em 0 .50em 0\n        }\n\n        h3 {\n            font-size: 1.4em;\n            margin: 25px 0 10px 0\n        }\n\n        h4 {\n            margin: 0;\n            font-size: 105%\n        }\n\n        a {\n            color: #058;\n            text-decoration: none;\n            transition: color .3s ease-in-out\n        }\n\n        a:hover {\n            color: #e82\n        }\n\n        .title code {\n            font-weight: bold\n        }\n\n        h2[id^=\"header-\"] {\n            margin-top: 2em\n        }\n\n        .ident {\n            color: #900\n        }\n\n        pre code {\n            background: #f8f8f8;\n            font-size: .8em;\n            line-height: 1.4em\n        }\n\n        code {\n            background: #f2f2f1;\n            padding: 1px 4px;\n            overflow-wrap: break-word\n        }\n\n        h1 code {\n            background: transparent\n        }\n\n        pre {\n            background: #f8f8f8;\n            border: 0;\n            border-top: 1px solid #ccc;\n            border-bottom: 1px solid #ccc;\n            margin: 1em 0;\n            padding: 1ex\n        }\n\n        #http-server-module-list {\n            display: flex;\n            flex-flow: column\n        }\n\n        #http-server-module-list div {\n            display: flex\n        }\n\n        #http-server-module-list dt {\n            min-width: 10%\n        }\n\n        #http-server-module-list p {\n            margin-top: 0\n        }\n\n        .toc ul,\n        #index {\n            list-style-type: none;\n            margin: 0;\n            padding: 0\n        }\n\n        #index code {\n            background: transparent\n        }\n\n        #index h3 {\n            border-bottom: 1px solid #ddd\n        }\n\n        #index ul {\n            padding: 0\n        }\n\n        #index h4 {\n            font-weight: bold\n        }\n\n        #index h4+ul {\n            margin-bottom: .6em\n        }\n\n        @media (min-width:200ex) {\n            #index .two-column {\n                column-count: 2\n            }\n        }\n\n        @media (min-width:300ex) {\n            #index .two-column {\n                column-count: 3\n            }\n        }\n\n        dl {\n            margin-bottom: 2em\n        }\n\n        dl dl:last-child {\n            margin-bottom: 4em\n        }\n\n        dd {\n            margin: 0 0 1em 3em\n        }\n\n        #header-classes+dl>dd {\n            margin-bottom: 3em\n        }\n\n        dd dd {\n            margin-left: 2em\n        }\n\n        dd p {\n            margin: 10px 0\n        }\n\n        .name {\n            background: #eee;\n            font-weight: bold;\n            font-size: .85em;\n            padding: 5px 10px;\n            display: inline-block;\n            min-width: 40%\n        }\n\n        .name:hover {\n            background: #e0e0e0\n        }\n\n        .name>span:first-child {\n            white-space: nowrap\n        }\n\n        .name.class>span:nth-child(2) {\n            margin-left: .4em\n        }\n\n        .name small {\n            font-weight: normal\n        }\n\n        .inherited {\n            color: #999;\n            border-left: 5px solid #eee;\n            padding-left: 1em\n        }\n\n        .inheritance em {\n            font-style: normal;\n            font-weight: bold\n        }\n\n        .desc h2 {\n            font-weight: 400;\n            font-size: 1.25em\n        }\n\n        .desc h3 {\n            font-size: 1em\n        }\n\n        .desc dt code {\n            background: inherit\n        }\n\n        .source summary {\n            color: #666;\n            text-align: right;\n            font-weight: 400;\n            font-size: .8em;\n            text-transform: uppercase;\n            cursor: pointer\n        }\n\n        .source pre {\n            max-height: 500px;\n            overflow: auto;\n            margin: 0\n        }\n\n        .source pre code {\n            font-size: 12px;\n            overflow: visible\n        }\n\n        .hlist {\n            list-style: none\n        }\n\n        .hlist li {\n            display: inline\n        }\n\n        .hlist li:after {\n            content: ',\\2002'\n        }\n\n        .hlist li:last-child:after {\n            content: none\n        }\n\n        .hlist .hlist {\n            display: inline;\n            padding-left: 1em\n        }\n\n        img {\n            max-width: 100%\n        }\n\n        .admonition {\n            padding: .1em .5em\n        }\n\n        .admonition-title {\n            font-weight: bold\n        }\n\n        .admonition.note,\n        .admonition.info,\n        .admonition.important {\n            background: #aef\n        }\n\n        .admonition.todo,\n        .admonition.versionadded,\n        .admonition.tip,\n        .admonition.hint {\n            background: #dfd\n        }\n\n        .admonition.warning,\n        .admonition.versionchanged,\n        .admonition.deprecated {\n            background: #fd4\n        }\n\n        .admonition.error,\n        .admonition.danger,\n        .admonition.caution {\n            background: lightpink\n        }\n    </style>\n    <style media=\"screen and (min-width: 700px)\">\n        @media screen and (min-width:700px) {\n            #sidebar {\n                width: 30%\n            }\n\n            #content {\n                width: 70%;\n                max-width: 100ch;\n                padding: 3em 4em;\n                border-left: 1px solid #ddd\n            }\n\n            pre code {\n                font-size: 1em\n            }\n\n            .item .name {\n                font-size: 1em\n            }\n\n            main {\n                display: flex;\n                flex-direction: row-reverse;\n                justify-content: flex-end\n            }\n\n            .toc ul ul,\n            #index ul {\n                padding-left: 1.5em\n            }\n\n            .toc>ul>li {\n                margin-top: .5em\n            }\n        }\n    </style>\n    <style media=\"print\">\n        @media print {\n            #sidebar h1 {\n                page-break-before: always\n            }\n\n            .source {\n                display: none\n            }\n        }\n\n        @media print {\n            * {\n                background: transparent !important;\n                color: #000 !important;\n                box-shadow: none !important;\n                text-shadow: none !important\n            }\n\n            a[href]:after {\n                content: \" (\" attr(href) \")\";\n                font-size: 90%\n            }\n\n            a[href][title]:after {\n                content: none\n            }\n\n            abbr[title]:after {\n                content: \" (\" attr(title) \")\"\n            }\n\n            .ir a:after,\n            a[href^=\"javascript:\"]:after,\n            a[href^=\"#\"]:after {\n                content: \"\"\n            }\n\n            pre,\n            blockquote {\n                border: 1px solid #999;\n                page-break-inside: avoid\n            }\n\n            thead {\n                display: table-header-group\n            }\n\n            tr,\n            img {\n                page-break-inside: avoid\n            }\n\n            img {\n                max-width: 100% !important\n            }\n\n            @page {\n                margin: 0.5cm\n            }\n\n            p,\n            h2,\n            h3 {\n                orphans: 3;\n                widows: 3\n            }\n\n            h1,\n            h2,\n            h3,\n            h4,\n            h5,\n            h6 {\n                page-break-after: avoid\n            }\n        }\n    </style>\n</head>\n\n<body>\n    <main>\n        <article id=\"content\">\n            <header>\n                <h1 class=\"title\"><code>pcpp.parser</code> module</h1>\n            </header>\n            <section id=\"section-intro\">\n                <details class=\"source\">\n                    <summary>Source code</summary>\n                    <pre><code class=\"python\">#!/usr/bin/python\n# Python C99 conforming preprocessor parser config\n# (C) 2017-2026 Niall Douglas http://www.nedproductions.biz/\n# and (C) 2007-2017 David Beazley http://www.dabeaz.com/\n# Started: Feb 2017\n#\n# This C preprocessor was originally written by David Beazley and the\n# original can be found at https://github.com/dabeaz/ply/blob/master/ply/cpp.py\n# This edition substantially improves on standards conforming output,\n# getting quite close to what clang or GCC outputs.\n\nfrom __future__ import generators, print_function, absolute_import, division\n\nimport sys, re, os\n\nin_production = 1  # Set to 0 if editing pcpp implementation!\n\n# Some Python 3 compatibility shims\nif sys.version_info.major &lt; 3:\n    STRING_TYPES = (str, unicode)\nelse:\n    STRING_TYPES = str\n\n# -----------------------------------------------------------------------------\n# Default preprocessor lexer definitions.   These tokens are enough to get\n# a basic preprocessor working.   Other modules may import these if they want\n# -----------------------------------------------------------------------------\n\ntokens = (\n   &#39;CPP_ID&#39;,&#39;CPP_INTEGER&#39;, &#39;CPP_FLOAT&#39;, &#39;CPP_STRING&#39;, &#39;CPP_CHAR&#39;, &#39;CPP_WS&#39;, &#39;CPP_LINECONT&#39;, &#39;CPP_COMMENT1&#39;, &#39;CPP_COMMENT2&#39;,\n   &#39;CPP_POUND&#39;,&#39;CPP_DPOUND&#39;, &#39;CPP_PLUS&#39;, &#39;CPP_MINUS&#39;, &#39;CPP_STAR&#39;, &#39;CPP_FSLASH&#39;, &#39;CPP_PERCENT&#39;, &#39;CPP_BAR&#39;,\n   &#39;CPP_AMPERSAND&#39;, &#39;CPP_TILDE&#39;, &#39;CPP_HAT&#39;, &#39;CPP_LESS&#39;, &#39;CPP_GREATER&#39;, &#39;CPP_EQUAL&#39;, &#39;CPP_EXCLAMATION&#39;,\n   &#39;CPP_QUESTION&#39;, &#39;CPP_LPAREN&#39;, &#39;CPP_RPAREN&#39;, &#39;CPP_LBRACKET&#39;, &#39;CPP_RBRACKET&#39;, &#39;CPP_LCURLY&#39;, &#39;CPP_RCURLY&#39;,\n   &#39;CPP_DOT&#39;, &#39;CPP_COMMA&#39;, &#39;CPP_SEMICOLON&#39;, &#39;CPP_COLON&#39;, &#39;CPP_BSLASH&#39;, &#39;CPP_SQUOTE&#39;, &#39;CPP_DQUOTE&#39;,\n\n   &#39;CPP_DEREFERENCE&#39;, &#39;CPP_MINUSEQUAL&#39;, &#39;CPP_MINUSMINUS&#39;, &#39;CPP_LSHIFT&#39;, &#39;CPP_LESSEQUAL&#39;, &#39;CPP_RSHIFT&#39;,\n   &#39;CPP_GREATEREQUAL&#39;, &#39;CPP_LOGICALOR&#39;, &#39;CPP_OREQUAL&#39;, &#39;CPP_LOGICALAND&#39;, &#39;CPP_ANDEQUAL&#39;, &#39;CPP_EQUALITY&#39;,\n   &#39;CPP_INEQUALITY&#39;, &#39;CPP_XOREQUAL&#39;, &#39;CPP_MULTIPLYEQUAL&#39;, &#39;CPP_DIVIDEEQUAL&#39;, &#39;CPP_PLUSEQUAL&#39;, &#39;CPP_PLUSPLUS&#39;,\n   &#39;CPP_PERCENTEQUAL&#39;, &#39;CPP_LSHIFTEQUAL&#39;, &#39;CPP_RSHIFTEQUAL&#39;\n)\n\nliterals = &#34;+-*/%|&amp;~^&lt;&gt;=!?()[]{}.,;:\\\\\\&#39;\\&#34;&#34;\n\n# Whitespace, but don&#39;t match past the end of a line\ndef t_CPP_WS(t):\n    r&#39;([ \\t]+|\\n)&#39;\n    t.lexer.lineno += t.value.count(&#34;\\n&#34;)\n    return t\n\n# Line continuation, accept whitespace between the backslash and new line\ndef t_CPP_LINECONT(t):\n    r&#39;\\\\[ \\t]*\\n&#39;\n    t.value = t.value[1:-1]\n    t.lexer.lineno += 1\n    return t\n_string_literal_linecont_pat = re.compile(r&#39;\\\\[ \\t]*\\n&#39;)\n\nt_CPP_POUND = r&#39;\\#&#39;\nt_CPP_DPOUND = r&#39;\\#\\#&#39;\nt_CPP_PLUS = r&#39;\\+&#39;\nt_CPP_MINUS = r&#39;-&#39;\nt_CPP_STAR = r&#39;\\*&#39;\nt_CPP_FSLASH = r&#39;/&#39;\nt_CPP_PERCENT = r&#39;%&#39;\nt_CPP_BAR = r&#39;\\|&#39;\nt_CPP_AMPERSAND = r&#39;&amp;&#39;\nt_CPP_TILDE = r&#39;~&#39;\nt_CPP_HAT = r&#39;\\^&#39;\nt_CPP_LESS = r&#39;&lt;&#39;\nt_CPP_GREATER = r&#39;&gt;&#39;\nt_CPP_EQUAL = r&#39;=&#39;\nt_CPP_EXCLAMATION = r&#39;!&#39;\nt_CPP_QUESTION = r&#39;\\?&#39;\nt_CPP_LPAREN = r&#39;\\(&#39;\nt_CPP_RPAREN = r&#39;\\)&#39;\nt_CPP_LBRACKET = r&#39;\\[&#39;\nt_CPP_RBRACKET = r&#39;\\]&#39;\nt_CPP_LCURLY = r&#39;{&#39;\nt_CPP_RCURLY = r&#39;}&#39;\nt_CPP_DOT = r&#39;\\.&#39;\nt_CPP_COMMA = r&#39;,&#39;\nt_CPP_SEMICOLON = r&#39;;&#39;\nt_CPP_COLON = r&#39;:&#39;\nt_CPP_BSLASH = r&#39;\\\\&#39;\nt_CPP_SQUOTE = r&#34;&#39;&#34;\nt_CPP_DQUOTE = r&#39;&#34;&#39;\n\nt_CPP_DEREFERENCE = r&#39;-&gt;&#39;\nt_CPP_MINUSEQUAL = r&#39;-=&#39;\nt_CPP_MINUSMINUS = r&#39;--&#39;\nt_CPP_LSHIFT = r&#39;&lt;&lt;&#39;\nt_CPP_LESSEQUAL = r&#39;&lt;=&#39;\nt_CPP_RSHIFT = r&#39;&gt;&gt;&#39;\nt_CPP_GREATEREQUAL = r&#39;&gt;=&#39;\nt_CPP_LOGICALOR = r&#39;\\|\\|&#39;\nt_CPP_OREQUAL = r&#39;\\|=&#39;\nt_CPP_LOGICALAND = r&#39;&amp;&amp;&#39;\nt_CPP_ANDEQUAL = r&#39;&amp;=&#39;\nt_CPP_EQUALITY = r&#39;==&#39;\nt_CPP_INEQUALITY = r&#39;!=&#39;\nt_CPP_XOREQUAL = r&#39;^=&#39;\nt_CPP_MULTIPLYEQUAL = r&#39;\\*=&#39;\nt_CPP_DIVIDEEQUAL = r&#39;/=&#39;\nt_CPP_PLUSEQUAL = r&#39;\\+=&#39;\nt_CPP_PLUSPLUS = r&#39;\\+\\+&#39;\nt_CPP_PERCENTEQUAL = r&#39;%=&#39;\nt_CPP_LSHIFTEQUAL = r&#39;&lt;&lt;=&#39;\nt_CPP_RSHIFTEQUAL = r&#39;&gt;&gt;=&#39;\n\n\n# Identifier\nt_CPP_ID = r&#39;[A-Za-z_][\\w_]*&#39;\n\n# Integer literal\ndef CPP_INTEGER(t):\n    r&#39;(((((0x)|(0X))[0-9a-fA-F]+)|(\\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)&#39;\n    return t\n\nt_CPP_INTEGER = CPP_INTEGER\n\n# Floating literal\nt_CPP_FLOAT = r&#39;((\\d+)(\\.\\d+)(e(\\+|-)?(\\d+))?|(\\d+)e(\\+|-)?(\\d+))([lL]|[fF])?&#39;\n\n# String literal\ndef t_CPP_STRING(t):\n    r&#39;\\&#34;([^\\\\\\n]|(\\\\(.|\\n)))*?\\&#34;&#39;\n    t.value, subs_made = _string_literal_linecont_pat.subn(&#39;&#39;, t.value)\n    t.lexer.lineno += subs_made + t.value.count(&#34;\\n&#34;)\n    return t\n\n# Character constant &#39;c&#39; or L&#39;c&#39;\ndef t_CPP_CHAR(t):\n    r&#39;(L)?\\&#39;([^\\\\\\n]|(\\\\(.|\\n)))*?\\&#39;&#39;\n    t.lexer.lineno += t.value.count(&#34;\\n&#34;)\n    return t\n\n# Comment\ndef t_CPP_COMMENT1(t):\n    r&#39;(/\\*(.|\\n)*?\\*/)&#39;\n    ncr = t.value.count(&#34;\\n&#34;)\n    t.lexer.lineno += ncr\n    return t\n\n# Line comment\ndef t_CPP_COMMENT2(t):\n    r&#39;(//[^\\n]*)&#39;\n    return t\n    \ndef t_error(t):\n    t.type = t.value[0]\n    t.value = t.value[0]\n    t.lexer.skip(1)\n    return t\n\n\n# Python 2/3 compatible way of importing a subpackage\noldsyspath = sys.path\nsys.path = [ os.path.join( os.path.dirname( os.path.abspath(__file__) ), &#34;ply&#34; ) ] + sys.path\nfrom ply import lex, yacc\nfrom ply.lex import LexToken\nsys.path = oldsyspath\ndel oldsyspath\n\n# -----------------------------------------------------------------------------\n# trigraph()\n# \n# Given an input string, this function replaces all trigraph sequences. \n# The following mapping is used:\n#\n#     ??=    #\n#     ??/    \\\n#     ??&#39;    ^\n#     ??(    [\n#     ??)    ]\n#     ??!    |\n#     ??&lt;    {\n#     ??&gt;    }\n#     ??-    ~\n# -----------------------------------------------------------------------------\n\n_trigraph_pat = re.compile(r&#39;&#39;&#39;\\?\\?[=/\\&#39;\\(\\)\\!&lt;&gt;\\-]&#39;&#39;&#39;)\n_trigraph_rep = {\n    &#39;=&#39;:&#39;#&#39;,\n    &#39;/&#39;:&#39;\\\\&#39;,\n    &#34;&#39;&#34;:&#39;^&#39;,\n    &#39;(&#39;:&#39;[&#39;,\n    &#39;)&#39;:&#39;]&#39;,\n    &#39;!&#39;:&#39;|&#39;,\n    &#39;&lt;&#39;:&#39;{&#39;,\n    &#39;&gt;&#39;:&#39;}&#39;,\n    &#39;-&#39;:&#39;~&#39;\n}\n\ndef trigraph(input):\n    return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)\n\ndef default_lexer():\n    return lex.lex(optimize=in_production)\n\n# ------------------------------------------------------------------\n# Macro object\n#\n# This object holds information about preprocessor macros\n#\n#    .name      - Macro name (string)\n#    .value     - Macro value (a list of tokens)\n#    .arglist   - List of argument names\n#    .variadic  - Boolean indicating whether or not variadic macro\n#    .vararg    - Name of the variadic parameter\n#\n# When a macro is created, the macro replacement token sequence is\n# pre-scanned and used to create patch lists that are later used\n# during macro expansion\n# ------------------------------------------------------------------\n\nclass Macro(object):\n    def __init__(self,name,value,arglist=None,variadic=False):\n        self.name = name\n        self.value = value\n        self.arglist = arglist\n        self.variadic = variadic\n        if variadic:\n            self.vararg = arglist[-1]\n        self.source = None\n        self.lineno = None\n    def __repr__(self):\n        return &#34;%s(%s)=%s&#34; % (self.name, self.arglist, self.value)\n\n# ------------------------------------------------------------------\n# Preprocessor event hooks\n#\n# Override these to customise preprocessing\n# ------------------------------------------------------------------\n\nclass Action(object):\n    &#34;&#34;&#34;What kind of abort processing to do in OutputDirective&#34;&#34;&#34;\n    IgnoreAndPassThrough = 0\n    &#34;&#34;&#34;Abort processing (don&#39;t execute), but pass the directive through to output&#34;&#34;&#34;\n    IgnoreAndRemove = 1\n    &#34;&#34;&#34;Abort processing (don&#39;t execute), and remove from output&#34;&#34;&#34;\n\nclass OutputDirective(Exception):\n    &#34;&#34;&#34;Raise this exception to abort processing of a preprocessor directive and\n    to instead output it as is into the output&#34;&#34;&#34;\n    def __init__(self, action):\n        self.action = action\n\nclass PreprocessorHooks(object):\n    &#34;&#34;&#34;Override these in your subclass of Preprocessor to customise preprocessing&#34;&#34;&#34;\n    def __init__(self):\n        self.lastdirective = None\n\n    def on_error(self,file,line,msg):\n        &#34;&#34;&#34;Called when the preprocessor has encountered an error, e.g. malformed input.\n        \n        The default simply prints to stderr and increments the return code.\n        &#34;&#34;&#34;\n        print(&#34;%s:%d error: %s&#34; % (file,line,msg), file = sys.stderr)\n        self.return_code += 1\n        \n    def on_file_open(self,is_system_include,includepath):\n        &#34;&#34;&#34;Called to open a file for reading.\n        \n        This hook provides the ability to use ``chardet``, or any other mechanism,\n        to inspect a file for its text encoding, and open it appropriately. Be\n        aware that this function is used to probe for possible include file locations,\n        so ``includepath`` may not exist. If it does not, raise the appropriate\n        ``IOError`` exception.\n        \n        The default calls ``io.open(includepath, &#39;r&#39;, encoding = self.assume_encoding)``,\n        examines if it starts with a BOM (if so, it removes it), and returns the file\n        object opened. This raises the appropriate exception if the path was not found.\n        &#34;&#34;&#34;\n        if sys.version_info.major &lt; 3:\n            assert self.assume_encoding is None\n            ret = open(includepath, &#39;r&#39;)\n        else:\n            ret = open(includepath, &#39;r&#39;, encoding = self.assume_encoding)\n        bom = ret.read(1)\n        #print(repr(bom))\n        if bom != &#39;\\ufeff&#39;:\n            ret.seek(0)\n        return ret\n\n    def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n        &#34;&#34;&#34;Called when a #include wasn&#39;t found.\n        \n        Raise OutputDirective to pass through or remove, else return\n        a suitable path. Remember that Preprocessor.add_path() lets you add search paths.\n        \n        The default calls ``self.on_error()`` with a suitable error message about the\n        include file not found if ``is_malformed`` is False, else a suitable error\n        message about a malformed #include, and in both cases raises OutputDirective\n        (pass through).\n        &#34;&#34;&#34;\n        if is_malformed:\n            self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Malformed #include statement: %s&#34; % includepath)\n        else:\n            self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Include file &#39;%s&#39; not found&#34; % includepath)\n        raise OutputDirective(Action.IgnoreAndPassThrough)\n        \n    def on_unknown_macro_in_defined_expr(self,tok):\n        &#34;&#34;&#34;Called when an expression passed to an #if contained a defined operator\n        performed on something unknown.\n        \n        Return True if to treat it as defined, False if to treat it as undefined,\n        raise OutputDirective to pass through without execution, or return None to\n        pass through the mostly expanded #if expression apart from the unknown defined.\n        \n        The default returns False, as per the C standard.\n        &#34;&#34;&#34;\n        return False\n\n    def on_unknown_macro_in_expr(self,ident):\n        &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown identifier.\n        \n        Return what value the expression evaluator ought to use, or return None to\n        pass through the mostly expanded #if expression.\n        \n        The default returns an integer 0, as per the C standard.\n        &#34;&#34;&#34;\n        return 0\n    \n    def on_unknown_macro_function_in_expr(self,ident):\n        &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown function.\n        \n        Return a callable which will be invoked by the expression evaluator to\n        evaluate the input to the function, or return None to pass through the\n        mostly expanded #if expression.\n        \n        The default returns a lambda which returns integer 0, as per the C standard.\n        &#34;&#34;&#34;\n        return lambda x : 0\n    \n    def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n        &#34;&#34;&#34;Called when there is one of\n        \n        define, include, undef, ifdef, ifndef, if, elif, else, endif\n        \n        Return True to execute and remove from the output, raise OutputDirective\n        to pass through or remove without execution, or return None to execute\n        AND pass through to the output (this only works for #define, #undef).\n        \n        The default returns True (execute and remove from the output).\n\n        directive is the directive, toks is the tokens after the directive,\n        ifpassthru is whether we are in passthru mode, precedingtoks is the\n        tokens preceding the directive from the # token until the directive.\n        &#34;&#34;&#34;\n        self.lastdirective = directive\n        return True\n        \n    def on_directive_unknown(self,directive,toks,ifpassthru,precedingtoks):\n        &#34;&#34;&#34;Called when the preprocessor encounters a #directive it doesn&#39;t understand.\n        This is actually quite an extensive list as it currently only understands:\n        \n        define, include, undef, ifdef, ifndef, if, elif, else, endif\n        \n        Return True to remove from the output, raise OutputDirective\n        to pass through or remove, or return None to\n        pass through into the output.\n        \n        The default handles #error and #warning by printing to stderr and returning True\n        (remove from output). For everything else it returns None (pass through into output).\n\n        directive is the directive, toks is the tokens after the directive,\n        ifpassthru is whether we are in passthru mode, precedingtoks is the\n        tokens preceding the directive from the # token until the directive.\n        &#34;&#34;&#34;\n        if directive.value == &#39;error&#39;:\n            print(&#34;%s:%d error: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n            self.return_code += 1\n            return True\n        elif directive.value == &#39;warning&#39;:\n            print(&#34;%s:%d warning: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n            return True\n        return None\n        \n    def on_potential_include_guard(self,macro):\n        &#34;&#34;&#34;Called when the preprocessor encounters an #ifndef macro or an #if !defined(macro)\n        as the first non-whitespace thing in a file. Unlike the other hooks, macro is a string,\n        not a token.\n        &#34;&#34;&#34;\n        pass\n    \n    def on_comment(self,tok):\n        &#34;&#34;&#34;Called when the preprocessor encounters a comment token. You can modify the token\n        in place. You must return True to let the comment pass through, else it will be removed.\n        \n        Returning False or None modifies the token to become whitespace, becoming a single space\n        if the comment is a block comment, else a single new line if the comment is a line comment.\n        &#34;&#34;&#34;\n        return None</code></pre>\n                </details>\n            </section>\n            <section>\n            </section>\n            <section>\n            </section>\n            <section>\n                <h2 class=\"section-title\" id=\"header-functions\">Functions</h2>\n                <dl>\n                    <dt id=\"pcpp.parser.CPP_INTEGER\"><code class=\"name flex\">\n<span>def <span class=\"ident\">CPP_INTEGER</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>(((((0x)|(0X))[0-9a-fA-F]+)|(\\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def CPP_INTEGER(t):\n    r&#39;(((((0x)|(0X))[0-9a-fA-F]+)|(\\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)&#39;\n    return t</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.default_lexer\"><code class=\"name flex\">\n<span>def <span class=\"ident\">default_lexer</span></span>(<span>)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\"></section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def default_lexer():\n    return lex.lex(optimize=in_production)</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.t_CPP_CHAR\"><code class=\"name flex\">\n<span>def <span class=\"ident\">t_CPP_CHAR</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>(L)?'([^\\\\n]|(\\(.|\\n)))*?'</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def t_CPP_CHAR(t):\n    r&#39;(L)?\\&#39;([^\\\\\\n]|(\\\\(.|\\n)))*?\\&#39;&#39;\n    t.lexer.lineno += t.value.count(&#34;\\n&#34;)\n    return t</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.t_CPP_COMMENT1\"><code class=\"name flex\">\n<span>def <span class=\"ident\">t_CPP_COMMENT1</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>(/*(.|\\n)*?*/)</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def t_CPP_COMMENT1(t):\n    r&#39;(/\\*(.|\\n)*?\\*/)&#39;\n    ncr = t.value.count(&#34;\\n&#34;)\n    t.lexer.lineno += ncr\n    return t</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.t_CPP_COMMENT2\"><code class=\"name flex\">\n<span>def <span class=\"ident\">t_CPP_COMMENT2</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>(//[^\\n]*)</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def t_CPP_COMMENT2(t):\n    r&#39;(//[^\\n]*)&#39;\n    return t</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.t_CPP_INTEGER\"><code class=\"name flex\">\n<span>def <span class=\"ident\">t_CPP_INTEGER</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>(((((0x)|(0X))[0-9a-fA-F]+)|(\\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def CPP_INTEGER(t):\n    r&#39;(((((0x)|(0X))[0-9a-fA-F]+)|(\\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)&#39;\n    return t</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.t_CPP_LINECONT\"><code class=\"name flex\">\n<span>def <span class=\"ident\">t_CPP_LINECONT</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>\\[ \\t]*\\n</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def t_CPP_LINECONT(t):\n    r&#39;\\\\[ \\t]*\\n&#39;\n    t.value = t.value[1:-1]\n    t.lexer.lineno += 1\n    return t</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.t_CPP_STRING\"><code class=\"name flex\">\n<span>def <span class=\"ident\">t_CPP_STRING</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>\"([^\\\\n]|(\\(.|\\n)))*?\"</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def t_CPP_STRING(t):\n    r&#39;\\&#34;([^\\\\\\n]|(\\\\(.|\\n)))*?\\&#34;&#39;\n    t.value, subs_made = _string_literal_linecont_pat.subn(&#39;&#39;, t.value)\n    t.lexer.lineno += subs_made + t.value.count(&#34;\\n&#34;)\n    return t</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.t_CPP_WS\"><code class=\"name flex\">\n<span>def <span class=\"ident\">t_CPP_WS</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>([ \\t]+|\\n)</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def t_CPP_WS(t):\n    r&#39;([ \\t]+|\\n)&#39;\n    t.lexer.lineno += t.value.count(&#34;\\n&#34;)\n    return t</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.t_error\"><code class=\"name flex\">\n<span>def <span class=\"ident\">t_error</span></span>(<span>t)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\"></section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def t_error(t):\n    t.type = t.value[0]\n    t.value = t.value[0]\n    t.lexer.skip(1)\n    return t</code></pre>\n                        </details>\n                    </dd>\n                    <dt id=\"pcpp.parser.trigraph\"><code class=\"name flex\">\n<span>def <span class=\"ident\">trigraph</span></span>(<span>input)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\"></section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">def trigraph(input):\n    return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)</code></pre>\n                        </details>\n                    </dd>\n                </dl>\n            </section>\n            <section>\n                <h2 class=\"section-title\" id=\"header-classes\">Classes</h2>\n                <dl>\n                    <dt id=\"pcpp.parser.Action\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">Action</span></span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>What kind of abort processing to do in OutputDirective</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class Action(object):\n    &#34;&#34;&#34;What kind of abort processing to do in OutputDirective&#34;&#34;&#34;\n    IgnoreAndPassThrough = 0\n    &#34;&#34;&#34;Abort processing (don&#39;t execute), but pass the directive through to output&#34;&#34;&#34;\n    IgnoreAndRemove = 1\n    &#34;&#34;&#34;Abort processing (don&#39;t execute), and remove from output&#34;&#34;&#34;</code></pre>\n                        </details>\n                        <h3>Class variables</h3>\n                        <dl>\n                            <dt id=\"pcpp.parser.Action.IgnoreAndPassThrough\"><code\n                                    class=\"name\">var <span class=\"ident\">IgnoreAndPassThrough</span></code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Abort processing (don't execute), but pass the directive through to output</p>\n                                </section>\n                            </dd>\n                            <dt id=\"pcpp.parser.Action.IgnoreAndRemove\"><code\n                                    class=\"name\">var <span class=\"ident\">IgnoreAndRemove</span></code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Abort processing (don't execute), and remove from output</p>\n                                </section>\n                            </dd>\n                        </dl>\n                    </dd>\n                    <dt id=\"pcpp.parser.Macro\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">Macro</span></span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\"></section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class Macro(object):\n    def __init__(self,name,value,arglist=None,variadic=False):\n        self.name = name\n        self.value = value\n        self.arglist = arglist\n        self.variadic = variadic\n        if variadic:\n            self.vararg = arglist[-1]\n        self.source = None\n        self.lineno = None\n    def __repr__(self):\n        return &#34;%s(%s)=%s&#34; % (self.name, self.arglist, self.value)</code></pre>\n                        </details>\n                        <h3>Methods</h3>\n                        <dl>\n                            <dt id=\"pcpp.parser.Macro.__init__\"><code class=\"name flex\">\n<span>def <span class=\"ident\">__init__</span></span>(<span>self, name, value, arglist=None, variadic=False)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Initialize self.\n                                        See help(type(self)) for accurate signature.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def __init__(self,name,value,arglist=None,variadic=False):\n    self.name = name\n    self.value = value\n    self.arglist = arglist\n    self.variadic = variadic\n    if variadic:\n        self.vararg = arglist[-1]\n    self.source = None\n    self.lineno = None</code></pre>\n                                </details>\n                            </dd>\n                        </dl>\n                    </dd>\n                    <dt id=\"pcpp.parser.OutputDirective\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">OutputDirective</span></span>\n<span>(</span><span><small>ancestors:</small> builtins.Exception, builtins.BaseException)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>Raise this exception to abort processing of a preprocessor directive and\n                                to instead output it as is into the output</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class OutputDirective(Exception):\n    &#34;&#34;&#34;Raise this exception to abort processing of a preprocessor directive and\n    to instead output it as is into the output&#34;&#34;&#34;\n    def __init__(self, action):\n        self.action = action</code></pre>\n                        </details>\n                        <h3>Methods</h3>\n                        <dl>\n                            <dt id=\"pcpp.parser.OutputDirective.__init__\"><code class=\"name flex\">\n<span>def <span class=\"ident\">__init__</span></span>(<span>self, action)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Initialize self.\n                                        See help(type(self)) for accurate signature.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def __init__(self, action):\n    self.action = action</code></pre>\n                                </details>\n                            </dd>\n                        </dl>\n                    </dd>\n                    <dt id=\"pcpp.parser.PreprocessorHooks\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">PreprocessorHooks</span></span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>Override these in your subclass of Preprocessor to customise preprocessing</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class PreprocessorHooks(object):\n    &#34;&#34;&#34;Override these in your subclass of Preprocessor to customise preprocessing&#34;&#34;&#34;\n    def __init__(self):\n        self.lastdirective = None\n\n    def on_error(self,file,line,msg):\n        &#34;&#34;&#34;Called when the preprocessor has encountered an error, e.g. malformed input.\n        \n        The default simply prints to stderr and increments the return code.\n        &#34;&#34;&#34;\n        print(&#34;%s:%d error: %s&#34; % (file,line,msg), file = sys.stderr)\n        self.return_code += 1\n        \n    def on_file_open(self,is_system_include,includepath):\n        &#34;&#34;&#34;Called to open a file for reading.\n        \n        This hook provides the ability to use ``chardet``, or any other mechanism,\n        to inspect a file for its text encoding, and open it appropriately. Be\n        aware that this function is used to probe for possible include file locations,\n        so ``includepath`` may not exist. If it does not, raise the appropriate\n        ``IOError`` exception.\n        \n        The default calls ``io.open(includepath, &#39;r&#39;, encoding = self.assume_encoding)``,\n        examines if it starts with a BOM (if so, it removes it), and returns the file\n        object opened. This raises the appropriate exception if the path was not found.\n        &#34;&#34;&#34;\n        if sys.version_info.major &lt; 3:\n            assert self.assume_encoding is None\n            ret = open(includepath, &#39;r&#39;)\n        else:\n            ret = open(includepath, &#39;r&#39;, encoding = self.assume_encoding)\n        bom = ret.read(1)\n        #print(repr(bom))\n        if bom != &#39;\\ufeff&#39;:\n            ret.seek(0)\n        return ret\n\n    def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n        &#34;&#34;&#34;Called when a #include wasn&#39;t found.\n        \n        Raise OutputDirective to pass through or remove, else return\n        a suitable path. Remember that Preprocessor.add_path() lets you add search paths.\n        \n        The default calls ``self.on_error()`` with a suitable error message about the\n        include file not found if ``is_malformed`` is False, else a suitable error\n        message about a malformed #include, and in both cases raises OutputDirective\n        (pass through).\n        &#34;&#34;&#34;\n        if is_malformed:\n            self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Malformed #include statement: %s&#34; % includepath)\n        else:\n            self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Include file &#39;%s&#39; not found&#34; % includepath)\n        raise OutputDirective(Action.IgnoreAndPassThrough)\n        \n    def on_unknown_macro_in_defined_expr(self,tok):\n        &#34;&#34;&#34;Called when an expression passed to an #if contained a defined operator\n        performed on something unknown.\n        \n        Return True if to treat it as defined, False if to treat it as undefined,\n        raise OutputDirective to pass through without execution, or return None to\n        pass through the mostly expanded #if expression apart from the unknown defined.\n        \n        The default returns False, as per the C standard.\n        &#34;&#34;&#34;\n        return False\n\n    def on_unknown_macro_in_expr(self,ident):\n        &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown identifier.\n        \n        Return what value the expression evaluator ought to use, or return None to\n        pass through the mostly expanded #if expression.\n        \n        The default returns an integer 0, as per the C standard.\n        &#34;&#34;&#34;\n        return 0\n    \n    def on_unknown_macro_function_in_expr(self,ident):\n        &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown function.\n        \n        Return a callable which will be invoked by the expression evaluator to\n        evaluate the input to the function, or return None to pass through the\n        mostly expanded #if expression.\n        \n        The default returns a lambda which returns integer 0, as per the C standard.\n        &#34;&#34;&#34;\n        return lambda x : 0\n    \n    def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n        &#34;&#34;&#34;Called when there is one of\n        \n        define, include, undef, ifdef, ifndef, if, elif, else, endif\n        \n        Return True to execute and remove from the output, raise OutputDirective\n        to pass through or remove without execution, or return None to execute\n        AND pass through to the output (this only works for #define, #undef).\n        \n        The default returns True (execute and remove from the output).\n\n        directive is the directive, toks is the tokens after the directive,\n        ifpassthru is whether we are in passthru mode, precedingtoks is the\n        tokens preceding the directive from the # token until the directive.\n        &#34;&#34;&#34;\n        self.lastdirective = directive\n        return True\n        \n    def on_directive_unknown(self,directive,toks,ifpassthru,precedingtoks):\n        &#34;&#34;&#34;Called when the preprocessor encounters a #directive it doesn&#39;t understand.\n        This is actually quite an extensive list as it currently only understands:\n        \n        define, include, undef, ifdef, ifndef, if, elif, else, endif\n        \n        Return True to remove from the output, raise OutputDirective\n        to pass through or remove, or return None to\n        pass through into the output.\n        \n        The default handles #error and #warning by printing to stderr and returning True\n        (remove from output). For everything else it returns None (pass through into output).\n\n        directive is the directive, toks is the tokens after the directive,\n        ifpassthru is whether we are in passthru mode, precedingtoks is the\n        tokens preceding the directive from the # token until the directive.\n        &#34;&#34;&#34;\n        if directive.value == &#39;error&#39;:\n            print(&#34;%s:%d error: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n            self.return_code += 1\n            return True\n        elif directive.value == &#39;warning&#39;:\n            print(&#34;%s:%d warning: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n            return True\n        return None\n        \n    def on_potential_include_guard(self,macro):\n        &#34;&#34;&#34;Called when the preprocessor encounters an #ifndef macro or an #if !defined(macro)\n        as the first non-whitespace thing in a file. Unlike the other hooks, macro is a string,\n        not a token.\n        &#34;&#34;&#34;\n        pass\n    \n    def on_comment(self,tok):\n        &#34;&#34;&#34;Called when the preprocessor encounters a comment token. You can modify the token\n        in place. You must return True to let the comment pass through, else it will be removed.\n        \n        Returning False or None modifies the token to become whitespace, becoming a single space\n        if the comment is a block comment, else a single new line if the comment is a line comment.\n        &#34;&#34;&#34;\n        return None</code></pre>\n                        </details>\n                        <h3>Subclasses</h3>\n                        <ul class=\"hlist\">\n                            <li><a title=\"pcpp.preprocessor.Preprocessor\"\n                                    href=\"preprocessor.html#pcpp.preprocessor.Preprocessor\">Preprocessor</a></li>\n                        </ul>\n                        <h3>Methods</h3>\n                        <dl>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.__init__\"><code class=\"name flex\">\n<span>def <span class=\"ident\">__init__</span></span>(<span>self)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Initialize self.\n                                        See help(type(self)) for accurate signature.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def __init__(self):\n    self.lastdirective = None</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_comment\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_comment</span></span>(<span>self, tok)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when the preprocessor encounters a comment token. You can modify the token\n                                        in place. You must return True to let the comment pass through, else it will be\n                                        removed.</p>\n                                    <p>Returning False or None modifies the token to become whitespace, becoming a\n                                        single space\n                                        if the comment is a block comment, else a single new line if the comment is a\n                                        line comment.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_comment(self,tok):\n    &#34;&#34;&#34;Called when the preprocessor encounters a comment token. You can modify the token\n    in place. You must return True to let the comment pass through, else it will be removed.\n    \n    Returning False or None modifies the token to become whitespace, becoming a single space\n    if the comment is a block comment, else a single new line if the comment is a line comment.\n    &#34;&#34;&#34;\n    return None</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_directive_handle\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_directive_handle</span></span>(<span>self, directive, toks, ifpassthru, precedingtoks)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when there is one of</p>\n                                    <p>define, include, undef, ifdef, ifndef, if, elif, else, endif</p>\n                                    <p>Return True to execute and remove from the output, raise OutputDirective\n                                        to pass through or remove without execution, or return None to execute\n                                        AND pass through to the output (this only works for #define, #undef).</p>\n                                    <p>The default returns True (execute and remove from the output).</p>\n                                    <p>directive is the directive, toks is the tokens after the directive,\n                                        ifpassthru is whether we are in passthru mode, precedingtoks is the\n                                        tokens preceding the directive from the # token until the directive.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n    &#34;&#34;&#34;Called when there is one of\n    \n    define, include, undef, ifdef, ifndef, if, elif, else, endif\n    \n    Return True to execute and remove from the output, raise OutputDirective\n    to pass through or remove without execution, or return None to execute\n    AND pass through to the output (this only works for #define, #undef).\n    \n    The default returns True (execute and remove from the output).\n\n    directive is the directive, toks is the tokens after the directive,\n    ifpassthru is whether we are in passthru mode, precedingtoks is the\n    tokens preceding the directive from the # token until the directive.\n    &#34;&#34;&#34;\n    self.lastdirective = directive\n    return True</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_directive_unknown\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_directive_unknown</span></span>(<span>self, directive, toks, ifpassthru, precedingtoks)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when the preprocessor encounters a #directive it doesn't understand.\n                                        This is actually quite an extensive list as it currently only understands:</p>\n                                    <p>define, include, undef, ifdef, ifndef, if, elif, else, endif</p>\n                                    <p>Return True to remove from the output, raise OutputDirective\n                                        to pass through or remove, or return None to\n                                        pass through into the output.</p>\n                                    <p>The default handles #error and #warning by printing to stderr and returning True\n                                        (remove from output). For everything else it returns None (pass through into\n                                        output).</p>\n                                    <p>directive is the directive, toks is the tokens after the directive,\n                                        ifpassthru is whether we are in passthru mode, precedingtoks is the\n                                        tokens preceding the directive from the # token until the directive.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_directive_unknown(self,directive,toks,ifpassthru,precedingtoks):\n    &#34;&#34;&#34;Called when the preprocessor encounters a #directive it doesn&#39;t understand.\n    This is actually quite an extensive list as it currently only understands:\n    \n    define, include, undef, ifdef, ifndef, if, elif, else, endif\n    \n    Return True to remove from the output, raise OutputDirective\n    to pass through or remove, or return None to\n    pass through into the output.\n    \n    The default handles #error and #warning by printing to stderr and returning True\n    (remove from output). For everything else it returns None (pass through into output).\n\n    directive is the directive, toks is the tokens after the directive,\n    ifpassthru is whether we are in passthru mode, precedingtoks is the\n    tokens preceding the directive from the # token until the directive.\n    &#34;&#34;&#34;\n    if directive.value == &#39;error&#39;:\n        print(&#34;%s:%d error: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n        self.return_code += 1\n        return True\n    elif directive.value == &#39;warning&#39;:\n        print(&#34;%s:%d warning: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n        return True\n    return None</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_error\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_error</span></span>(<span>self, file, line, msg)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when the preprocessor has encountered an error, e.g. malformed input.</p>\n                                    <p>The default simply prints to stderr and increments the return code.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_error(self,file,line,msg):\n    &#34;&#34;&#34;Called when the preprocessor has encountered an error, e.g. malformed input.\n    \n    The default simply prints to stderr and increments the return code.\n    &#34;&#34;&#34;\n    print(&#34;%s:%d error: %s&#34; % (file,line,msg), file = sys.stderr)\n    self.return_code += 1</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_file_open\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_file_open</span></span>(<span>self, is_system_include, includepath)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called to open a file for reading.</p>\n                                    <p>This hook provides the ability to use <code>chardet</code>, or any other\n                                        mechanism,\n                                        to inspect a file for its text encoding, and open it appropriately. Be\n                                        aware that this function is used to probe for possible include file locations,\n                                        so <code>includepath</code> may not exist. If it does not, raise the appropriate\n                                        <code>IOError</code> exception.\n                                    </p>\n                                    <p>The default calls\n                                        <code>io.open(includepath, 'r', encoding = self.assume_encoding)</code>,\n                                        examines if it starts with a BOM (if so, it removes it), and returns the file\n                                        object opened. This raises the appropriate exception if the path was not found.\n                                    </p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_file_open(self,is_system_include,includepath):\n    &#34;&#34;&#34;Called to open a file for reading.\n    \n    This hook provides the ability to use ``chardet``, or any other mechanism,\n    to inspect a file for its text encoding, and open it appropriately. Be\n    aware that this function is used to probe for possible include file locations,\n    so ``includepath`` may not exist. If it does not, raise the appropriate\n    ``IOError`` exception.\n    \n    The default calls ``io.open(includepath, &#39;r&#39;, encoding = self.assume_encoding)``,\n    examines if it starts with a BOM (if so, it removes it), and returns the file\n    object opened. This raises the appropriate exception if the path was not found.\n    &#34;&#34;&#34;\n    if sys.version_info.major &lt; 3:\n        assert self.assume_encoding is None\n        ret = open(includepath, &#39;r&#39;)\n    else:\n        ret = open(includepath, &#39;r&#39;, encoding = self.assume_encoding)\n    bom = ret.read(1)\n    #print(repr(bom))\n    if bom != &#39;\\ufeff&#39;:\n        ret.seek(0)\n    return ret</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_include_not_found\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_include_not_found</span></span>(<span>self, is_malformed, is_system_include, curdir, includepath)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when a #include wasn't found.</p>\n                                    <p>Raise OutputDirective to pass through or remove, else return\n                                        a suitable path. Remember that Preprocessor.add_path() lets you add search\n                                        paths.</p>\n                                    <p>The default calls <code>self.on_error()</code> with a suitable error message\n                                        about the\n                                        include file not found if <code>is_malformed</code> is False, else a suitable\n                                        error\n                                        message about a malformed #include, and in both cases raises OutputDirective\n                                        (pass through).</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n    &#34;&#34;&#34;Called when a #include wasn&#39;t found.\n    \n    Raise OutputDirective to pass through or remove, else return\n    a suitable path. Remember that Preprocessor.add_path() lets you add search paths.\n    \n    The default calls ``self.on_error()`` with a suitable error message about the\n    include file not found if ``is_malformed`` is False, else a suitable error\n    message about a malformed #include, and in both cases raises OutputDirective\n    (pass through).\n    &#34;&#34;&#34;\n    if is_malformed:\n        self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Malformed #include statement: %s&#34; % includepath)\n    else:\n        self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Include file &#39;%s&#39; not found&#34; % includepath)\n    raise OutputDirective(Action.IgnoreAndPassThrough)</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_potential_include_guard\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_potential_include_guard</span></span>(<span>self, macro)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when the preprocessor encounters an #ifndef macro or an #if\n                                        !defined(macro)\n                                        as the first non-whitespace thing in a file. Unlike the other hooks, macro is a\n                                        string,\n                                        not a token.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_potential_include_guard(self,macro):\n    &#34;&#34;&#34;Called when the preprocessor encounters an #ifndef macro or an #if !defined(macro)\n    as the first non-whitespace thing in a file. Unlike the other hooks, macro is a string,\n    not a token.\n    &#34;&#34;&#34;\n    pass</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_unknown_macro_function_in_expr\"><code\n                                    class=\"name flex\">\n<span>def <span class=\"ident\">on_unknown_macro_function_in_expr</span></span>(<span>self, ident)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when an expression passed to an #if contained an unknown function.</p>\n                                    <p>Return a callable which will be invoked by the expression evaluator to\n                                        evaluate the input to the function, or return None to pass through the\n                                        mostly expanded #if expression.</p>\n                                    <p>The default returns a lambda which returns integer 0, as per the C standard.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_unknown_macro_function_in_expr(self,ident):\n    &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown function.\n    \n    Return a callable which will be invoked by the expression evaluator to\n    evaluate the input to the function, or return None to pass through the\n    mostly expanded #if expression.\n    \n    The default returns a lambda which returns integer 0, as per the C standard.\n    &#34;&#34;&#34;\n    return lambda x : 0</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_unknown_macro_in_defined_expr\"><code\n                                    class=\"name flex\">\n<span>def <span class=\"ident\">on_unknown_macro_in_defined_expr</span></span>(<span>self, tok)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when an expression passed to an #if contained a defined operator\n                                        performed on something unknown.</p>\n                                    <p>Return True if to treat it as defined, False if to treat it as undefined,\n                                        raise OutputDirective to pass through without execution, or return None to\n                                        pass through the mostly expanded #if expression apart from the unknown defined.\n                                    </p>\n                                    <p>The default returns False, as per the C standard.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_unknown_macro_in_defined_expr(self,tok):\n    &#34;&#34;&#34;Called when an expression passed to an #if contained a defined operator\n    performed on something unknown.\n    \n    Return True if to treat it as defined, False if to treat it as undefined,\n    raise OutputDirective to pass through without execution, or return None to\n    pass through the mostly expanded #if expression apart from the unknown defined.\n    \n    The default returns False, as per the C standard.\n    &#34;&#34;&#34;\n    return False</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.parser.PreprocessorHooks.on_unknown_macro_in_expr\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_unknown_macro_in_expr</span></span>(<span>self, ident)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when an expression passed to an #if contained an unknown identifier.</p>\n                                    <p>Return what value the expression evaluator ought to use, or return None to\n                                        pass through the mostly expanded #if expression.</p>\n                                    <p>The default returns an integer 0, as per the C standard.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_unknown_macro_in_expr(self,ident):\n    &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown identifier.\n    \n    Return what value the expression evaluator ought to use, or return None to\n    pass through the mostly expanded #if expression.\n    \n    The default returns an integer 0, as per the C standard.\n    &#34;&#34;&#34;\n    return 0</code></pre>\n                                </details>\n                            </dd>\n                        </dl>\n                    </dd>\n                </dl>\n            </section>\n        </article>\n        <nav id=\"sidebar\">\n            <h1>Index</h1>\n            <div class=\"toc\">\n                <ul></ul>\n            </div>\n            <ul id=\"index\">\n                <li>\n                    <h3>Super-module</h3>\n                    <ul>\n                        <li><code><a title=\"pcpp\" href=\"index.html\">pcpp</a></code></li>\n                    </ul>\n                </li>\n                <li>\n                    <h3><a href=\"#header-functions\">Functions</a></h3>\n                    <ul class=\"two-column\">\n                        <li><code><a title=\"pcpp.parser.CPP_INTEGER\" href=\"#pcpp.parser.CPP_INTEGER\">CPP_INTEGER</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.parser.default_lexer\" href=\"#pcpp.parser.default_lexer\">default_lexer</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.parser.t_CPP_CHAR\" href=\"#pcpp.parser.t_CPP_CHAR\">t_CPP_CHAR</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.parser.t_CPP_COMMENT1\" href=\"#pcpp.parser.t_CPP_COMMENT1\">t_CPP_COMMENT1</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.parser.t_CPP_COMMENT2\" href=\"#pcpp.parser.t_CPP_COMMENT2\">t_CPP_COMMENT2</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.parser.t_CPP_INTEGER\" href=\"#pcpp.parser.t_CPP_INTEGER\">t_CPP_INTEGER</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.parser.t_CPP_LINECONT\" href=\"#pcpp.parser.t_CPP_LINECONT\">t_CPP_LINECONT</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.parser.t_CPP_STRING\" href=\"#pcpp.parser.t_CPP_STRING\">t_CPP_STRING</a></code>\n                        </li>\n                        <li><code><a title=\"pcpp.parser.t_CPP_WS\" href=\"#pcpp.parser.t_CPP_WS\">t_CPP_WS</a></code></li>\n                        <li><code><a title=\"pcpp.parser.t_error\" href=\"#pcpp.parser.t_error\">t_error</a></code></li>\n                        <li><code><a title=\"pcpp.parser.trigraph\" href=\"#pcpp.parser.trigraph\">trigraph</a></code></li>\n                    </ul>\n                </li>\n                <li>\n                    <h3><a href=\"#header-classes\">Classes</a></h3>\n                    <ul>\n                        <li>\n                            <h4><code><a title=\"pcpp.parser.Action\" href=\"#pcpp.parser.Action\">Action</a></code></h4>\n                            <ul class=\"\">\n                                <li><code><a title=\"pcpp.parser.Action.IgnoreAndPassThrough\" href=\"#pcpp.parser.Action.IgnoreAndPassThrough\">IgnoreAndPassThrough</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.Action.IgnoreAndRemove\" href=\"#pcpp.parser.Action.IgnoreAndRemove\">IgnoreAndRemove</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                        <li>\n                            <h4><code><a title=\"pcpp.parser.Macro\" href=\"#pcpp.parser.Macro\">Macro</a></code></h4>\n                            <ul class=\"\">\n                                <li><code><a title=\"pcpp.parser.Macro.__init__\" href=\"#pcpp.parser.Macro.__init__\">__init__</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                        <li>\n                            <h4><code><a title=\"pcpp.parser.OutputDirective\" href=\"#pcpp.parser.OutputDirective\">OutputDirective</a></code>\n                            </h4>\n                            <ul class=\"\">\n                                <li><code><a title=\"pcpp.parser.OutputDirective.__init__\" href=\"#pcpp.parser.OutputDirective.__init__\">__init__</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                        <li>\n                            <h4><code><a title=\"pcpp.parser.PreprocessorHooks\" href=\"#pcpp.parser.PreprocessorHooks\">PreprocessorHooks</a></code>\n                            </h4>\n                            <ul class=\"\">\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.__init__\" href=\"#pcpp.parser.PreprocessorHooks.__init__\">__init__</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_comment\" href=\"#pcpp.parser.PreprocessorHooks.on_comment\">on_comment</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_directive_handle\" href=\"#pcpp.parser.PreprocessorHooks.on_directive_handle\">on_directive_handle</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_directive_unknown\" href=\"#pcpp.parser.PreprocessorHooks.on_directive_unknown\">on_directive_unknown</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_error\" href=\"#pcpp.parser.PreprocessorHooks.on_error\">on_error</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_file_open\" href=\"#pcpp.parser.PreprocessorHooks.on_file_open\">on_file_open</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_include_not_found\" href=\"#pcpp.parser.PreprocessorHooks.on_include_not_found\">on_include_not_found</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_potential_include_guard\" href=\"#pcpp.parser.PreprocessorHooks.on_potential_include_guard\">on_potential_include_guard</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_unknown_macro_function_in_expr\" href=\"#pcpp.parser.PreprocessorHooks.on_unknown_macro_function_in_expr\">on_unknown_macro_function_in_expr</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_unknown_macro_in_defined_expr\" href=\"#pcpp.parser.PreprocessorHooks.on_unknown_macro_in_defined_expr\">on_unknown_macro_in_defined_expr</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_unknown_macro_in_expr\" href=\"#pcpp.parser.PreprocessorHooks.on_unknown_macro_in_expr\">on_unknown_macro_in_expr</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                    </ul>\n                </li>\n            </ul>\n        </nav>\n    </main>\n    <footer id=\"footer\">\n        <p>Generated by <a href=\"https://pdoc3.github.io/pdoc\"><cite>pdoc</cite> 0.5.3</a>.</p>\n    </footer>\n    <script src=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js\"></script>\n    <script>hljs.initHighlightingOnLoad()</script>\n</body>\n\n</html>"
  },
  {
    "path": "doc/parsetab.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n<head>\n<meta charset=\"utf-8\">\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1, minimum-scale=1\" />\n<meta name=\"generator\" content=\"pdoc 0.5.3\" />\n<title>pcpp.parsetab API documentation</title>\n<meta name=\"description\" content=\"\" />\n<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>\n<link href='https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/8.0.0/sanitize.min.css' rel='stylesheet'>\n<link href=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css\" rel=\"stylesheet\">\n<style>.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^=\"header-\"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{font-weight:bold}#index h4 + ul{margin-bottom:.6em}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.name small{font-weight:normal}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase;cursor:pointer}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}.admonition{padding:.1em .5em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>\n<style media=\"screen and (min-width: 700px)\">@media screen and (min-width:700px){#sidebar{width:30%}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>\n<style media=\"print\">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:\" (\" attr(href) \")\";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:\" (\" attr(title) \")\"}.ir a:after,a[href^=\"javascript:\"]:after,a[href^=\"#\"]:after{content:\"\"}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>\n</head>\n<body>\n<main>\n<article id=\"content\">\n<header>\n<h1 class=\"title\"><code>pcpp.parsetab</code> module</h1>\n</header>\n<section id=\"section-intro\">\n<details class=\"source\">\n<summary>Source code</summary>\n<pre><code class=\"python\"># parsetab.py\n# This file is automatically generated. Do not edit.\n# pylint: disable=W,C,R\n_tabversion = &#39;3.10&#39;\n\n_lr_method = &#39;LALR&#39;\n\n_lr_signature = &#39;leftCPP_COMMAleftCPP_QUESTIONCPP_COLONleftCPP_LOGICALORleftCPP_LOGICALANDleftCPP_BARleftCPP_HATleftCPP_AMPERSANDleftCPP_EQUALITYCPP_INEQUALITYleftCPP_LESSCPP_LESSEQUALCPP_GREATERCPP_GREATEREQUALleftCPP_LSHIFTCPP_RSHIFTleftCPP_PLUSCPP_MINUSleftCPP_STARCPP_FSLASHCPP_PERCENTrightUPLUSUMINUSCPP_EXCLAMATIONCPP_TILDECPP_AMPERSAND CPP_BAR CPP_CHAR CPP_COLON CPP_COMMA CPP_EQUALITY CPP_EXCLAMATION CPP_FSLASH CPP_GREATER CPP_GREATEREQUAL CPP_HAT CPP_ID CPP_INEQUALITY CPP_INTEGER CPP_LESS CPP_LESSEQUAL CPP_LOGICALAND CPP_LOGICALOR CPP_LPAREN CPP_LSHIFT CPP_MINUS CPP_PERCENT CPP_PLUS CPP_QUESTION CPP_RPAREN CPP_RSHIFT CPP_STAR CPP_STRING CPP_TILDEexpression : CPP_INTEGERexpression : CPP_CHAR\\n    expression : CPP_STRING\\n              | CPP_LESS expression CPP_GREATER\\n    expression : CPP_LPAREN expression CPP_RPARENexpression : CPP_PLUS expression %prec UPLUSexpression : CPP_MINUS expression %prec UMINUS\\n    expression : CPP_EXCLAMATION expression\\n              | CPP_TILDE expression\\n    \\n    expression : expression CPP_STAR expression\\n              | expression CPP_FSLASH expression\\n              | expression CPP_PERCENT expression\\n              | expression CPP_PLUS expression\\n              | expression CPP_MINUS expression\\n              | expression CPP_LSHIFT expression\\n              | expression CPP_RSHIFT expression\\n              | expression CPP_LESS expression\\n              | expression CPP_LESSEQUAL expression\\n              | expression CPP_GREATER expression\\n              | expression CPP_GREATEREQUAL expression\\n              | expression CPP_EQUALITY expression\\n              | expression CPP_INEQUALITY expression\\n              | expression CPP_AMPERSAND expression\\n              | expression CPP_HAT expression\\n              | expression CPP_BAR expression\\n              | expression CPP_LOGICALAND expression\\n              | expression CPP_LOGICALOR expression\\n              | expression CPP_COMMA expression\\n    expression : expression CPP_QUESTION expression CPP_COLON expressionexpression : CPP_ID CPP_LPAREN expression CPP_RPARENexpression : CPP_ID&#39;\n    \n_lr_action_items = {&#39;CPP_ID&#39;:([0,3,4,6,8,9,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,41,63,],[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,]),&#39;CPP_LSHIFT&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,18,-8,-6,18,-7,18,-9,18,-5,-4,-15,18,18,18,18,-12,18,18,18,-13,18,-11,18,18,-14,-10,18,18,18,-16,-30,18,]),&#39;CPP_STRING&#39;:([0,3,4,6,8,9,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,41,63,],[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,]),&#39;CPP_LESSEQUAL&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,19,-8,-6,19,-7,19,-9,19,-5,-4,-15,-18,-20,19,19,-12,19,19,19,-13,19,-11,19,19,-14,-10,19,-19,-17,-16,-30,19,]),&#39;CPP_GREATEREQUAL&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,20,-8,-6,20,-7,20,-9,20,-5,-4,-15,-18,-20,20,20,-12,20,20,20,-13,20,-11,20,20,-14,-10,20,-19,-17,-16,-30,20,]),&#39;CPP_LOGICALOR&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,21,-8,-6,21,-7,21,-9,21,-5,-4,-15,-18,-20,-27,-26,-12,-23,21,-25,-13,-22,-11,-21,-24,-14,-10,21,-19,-17,-16,-30,21,]),&#39;CPP_LOGICALAND&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,22,-8,-6,22,-7,22,-9,22,-5,-4,-15,-18,-20,22,-26,-12,-23,22,-25,-13,-22,-11,-21,-24,-14,-10,22,-19,-17,-16,-30,22,]),&#39;CPP_EXCLAMATION&#39;:([0,3,4,6,8,9,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,41,63,],[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,]),&#39;CPP_PERCENT&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,23,-8,-6,23,-7,23,-9,23,-5,-4,23,23,23,23,23,-12,23,23,23,23,23,-11,23,23,23,-10,23,23,23,23,-30,23,]),&#39;CPP_AMPERSAND&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,24,-8,-6,24,-7,24,-9,24,-5,-4,-15,-18,-20,24,24,-12,-23,24,24,-13,-22,-11,-21,24,-14,-10,24,-19,-17,-16,-30,24,]),&#39;CPP_QUESTION&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,25,-8,-6,25,-7,25,-9,25,-5,-4,-15,-18,-20,-27,-26,-12,-23,25,-25,-13,-22,-11,-21,-24,-14,-10,25,-19,-17,-16,-30,-29,]),&#39;CPP_BAR&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,26,-8,-6,26,-7,26,-9,26,-5,-4,-15,-18,-20,26,26,-12,-23,26,-25,-13,-22,-11,-21,-24,-14,-10,26,-19,-17,-16,-30,26,]),&#39;$end&#39;:([1,2,5,7,10,13,14,16,38,40,41,42,43,44,45,46,47,48,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,0,-8,-6,-7,-9,-5,-4,-15,-18,-20,-27,-26,-12,-23,-25,-13,-22,-11,-21,-24,-14,-10,-28,-19,-17,-16,-30,-29,]),&#39;CPP_PLUS&#39;:([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,],[4,-31,-3,4,4,-2,4,-1,4,4,27,4,4,-8,-6,27,-7,27,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,-9,27,-5,4,27,27,27,27,27,-12,27,27,27,-13,27,-11,27,27,-14,-10,27,27,27,27,-30,4,27,]),&#39;CPP_INEQUALITY&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,28,-8,-6,28,-7,28,-9,28,-5,-4,-15,-18,-20,28,28,-12,28,28,28,-13,-22,-11,-21,28,-14,-10,28,-19,-17,-16,-30,28,]),&#39;CPP_CHAR&#39;:([0,3,4,6,8,9,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,41,63,],[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,]),&#39;CPP_LPAREN&#39;:([0,1,3,4,6,8,9,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,41,63,],[6,12,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,]),&#39;CPP_FSLASH&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,29,-8,-6,29,-7,29,-9,29,-5,-4,29,29,29,29,29,-12,29,29,29,29,29,-11,29,29,29,-10,29,29,29,29,-30,29,]),&#39;CPP_INTEGER&#39;:([0,3,4,6,8,9,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,41,63,],[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,]),&#39;CPP_HAT&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,31,-8,-6,31,-7,31,-9,31,-5,-4,-15,-18,-20,31,31,-12,-23,31,31,-13,-22,-11,-21,-24,-14,-10,31,-19,-17,-16,-30,31,]),&#39;CPP_EQUALITY&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,30,-8,-6,30,-7,30,-9,30,-5,-4,-15,-18,-20,30,30,-12,30,30,30,-13,-22,-11,-21,30,-14,-10,30,-19,-17,-16,-30,30,]),&#39;CPP_MINUS&#39;:([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,],[8,-31,-3,8,8,-2,8,-1,8,8,32,8,8,-8,-6,32,-7,32,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,-9,32,-5,8,32,32,32,32,32,-12,32,32,32,-13,32,-11,32,32,-14,-10,32,32,32,32,-30,8,32,]),&#39;CPP_STAR&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,33,-8,-6,33,-7,33,-9,33,-5,-4,33,33,33,33,33,-12,33,33,33,33,33,-11,33,33,33,-10,33,33,33,33,-30,33,]),&#39;CPP_COMMA&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,34,-8,-6,34,-7,34,-9,34,-5,-4,-15,-18,-20,-27,-26,-12,-23,34,-25,-13,-22,-11,-21,-24,-14,-10,-28,-19,-17,-16,-30,-29,]),&#39;CPP_GREATER&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,35,-8,-6,35,-7,41,-9,35,-5,-4,-15,-18,-20,35,35,-12,35,35,35,-13,35,-11,35,35,-14,-10,35,-19,-17,-16,-30,35,]),&#39;CPP_RSHIFT&#39;:([1,2,5,7,10,13,14,15,16,17,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,37,-8,-6,37,-7,37,-9,37,-5,-4,-15,37,37,37,37,-12,37,37,37,-13,37,-11,37,37,-14,-10,37,37,37,-16,-30,37,]),&#39;CPP_LESS&#39;:([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,],[9,-31,-3,9,9,-2,9,-1,9,9,36,9,9,-8,-6,36,-7,36,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,-9,36,-5,-4,-15,-18,-20,36,36,-12,36,36,36,-13,36,-11,36,36,-14,-10,36,-19,-17,-16,-30,9,36,]),&#39;CPP_COLON&#39;:([1,2,5,7,13,14,16,38,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,-8,-6,-7,-9,-5,-4,-15,-18,-20,-27,-26,-12,-23,63,-25,-13,-22,-11,-21,-24,-14,-10,-28,-19,-17,-16,-30,-29,]),&#39;CPP_TILDE&#39;:([0,3,4,6,8,9,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,41,63,],[11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,]),&#39;CPP_RPAREN&#39;:([1,2,5,7,13,14,15,16,38,39,40,41,42,43,44,45,46,47,48,50,51,52,53,54,55,56,57,58,59,60,61,62,64,],[-31,-3,-2,-1,-8,-6,40,-7,-9,62,-5,-4,-15,-18,-20,-27,-26,-12,-23,-25,-13,-22,-11,-21,-24,-14,-10,-28,-19,-17,-16,-30,-29,]),}\n\n_lr_action = {}\nfor _k, _v in _lr_action_items.items():\n   for _x,_y in zip(_v[0],_v[1]):\n      if not _x in _lr_action:  _lr_action[_x] = {}\n      _lr_action[_x][_k] = _y\ndel _lr_action_items\n\n_lr_goto_items = {&#39;expression&#39;:([0,3,4,6,8,9,11,12,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,41,63,],[10,13,14,15,16,17,38,39,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,59,64,]),}\n\n_lr_goto = {}\nfor _k, _v in _lr_goto_items.items():\n   for _x, _y in zip(_v[0], _v[1]):\n       if not _x in _lr_goto: _lr_goto[_x] = {}\n       _lr_goto[_x][_k] = _y\ndel _lr_goto_items\n_lr_productions = [\n  (&#34;S&#39; -&gt; expression&#34;,&#34;S&#39;&#34;,1,None,None,None),\n  (&#39;expression -&gt; CPP_INTEGER&#39;,&#39;expression&#39;,1,&#39;p_expression_number&#39;,&#39;evaluator.py&#39;,396),\n  (&#39;expression -&gt; CPP_CHAR&#39;,&#39;expression&#39;,1,&#39;p_expression_character&#39;,&#39;evaluator.py&#39;,400),\n  (&#39;expression -&gt; CPP_STRING&#39;,&#39;expression&#39;,1,&#39;p_expression_string&#39;,&#39;evaluator.py&#39;,405),\n  (&#39;expression -&gt; CPP_LESS expression CPP_GREATER&#39;,&#39;expression&#39;,3,&#39;p_expression_string&#39;,&#39;evaluator.py&#39;,406),\n  (&#39;expression -&gt; CPP_LPAREN expression CPP_RPAREN&#39;,&#39;expression&#39;,3,&#39;p_expression_group&#39;,&#39;evaluator.py&#39;,411),\n  (&#39;expression -&gt; CPP_PLUS expression&#39;,&#39;expression&#39;,2,&#39;p_expression_uplus&#39;,&#39;evaluator.py&#39;,415),\n  (&#39;expression -&gt; CPP_MINUS expression&#39;,&#39;expression&#39;,2,&#39;p_expression_uminus&#39;,&#39;evaluator.py&#39;,419),\n  (&#39;expression -&gt; CPP_EXCLAMATION expression&#39;,&#39;expression&#39;,2,&#39;p_expression_unop&#39;,&#39;evaluator.py&#39;,424),\n  (&#39;expression -&gt; CPP_TILDE expression&#39;,&#39;expression&#39;,2,&#39;p_expression_unop&#39;,&#39;evaluator.py&#39;,425),\n  (&#39;expression -&gt; expression CPP_STAR expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,437),\n  (&#39;expression -&gt; expression CPP_FSLASH expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,438),\n  (&#39;expression -&gt; expression CPP_PERCENT expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,439),\n  (&#39;expression -&gt; expression CPP_PLUS expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,440),\n  (&#39;expression -&gt; expression CPP_MINUS expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,441),\n  (&#39;expression -&gt; expression CPP_LSHIFT expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,442),\n  (&#39;expression -&gt; expression CPP_RSHIFT expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,443),\n  (&#39;expression -&gt; expression CPP_LESS expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,444),\n  (&#39;expression -&gt; expression CPP_LESSEQUAL expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,445),\n  (&#39;expression -&gt; expression CPP_GREATER expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,446),\n  (&#39;expression -&gt; expression CPP_GREATEREQUAL expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,447),\n  (&#39;expression -&gt; expression CPP_EQUALITY expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,448),\n  (&#39;expression -&gt; expression CPP_INEQUALITY expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,449),\n  (&#39;expression -&gt; expression CPP_AMPERSAND expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,450),\n  (&#39;expression -&gt; expression CPP_HAT expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,451),\n  (&#39;expression -&gt; expression CPP_BAR expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,452),\n  (&#39;expression -&gt; expression CPP_LOGICALAND expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,453),\n  (&#39;expression -&gt; expression CPP_LOGICALOR expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,454),\n  (&#39;expression -&gt; expression CPP_COMMA expression&#39;,&#39;expression&#39;,3,&#39;p_expression_binop&#39;,&#39;evaluator.py&#39;,455),\n  (&#39;expression -&gt; expression CPP_QUESTION expression CPP_COLON expression&#39;,&#39;expression&#39;,5,&#39;p_expression_conditional&#39;,&#39;evaluator.py&#39;,501),\n  (&#39;expression -&gt; CPP_ID CPP_LPAREN expression CPP_RPAREN&#39;,&#39;expression&#39;,4,&#39;p_expression_function_call&#39;,&#39;evaluator.py&#39;,513),\n  (&#39;expression -&gt; CPP_ID&#39;,&#39;expression&#39;,1,&#39;p_expression_identifier&#39;,&#39;evaluator.py&#39;,520),\n]</code></pre>\n</details>\n</section>\n<section>\n</section>\n<section>\n</section>\n<section>\n</section>\n<section>\n</section>\n</article>\n<nav id=\"sidebar\">\n<h1>Index</h1>\n<div class=\"toc\">\n<ul></ul>\n</div>\n<ul id=\"index\">\n<li><h3>Super-module</h3>\n<ul>\n<li><code><a title=\"pcpp\" href=\"index.html\">pcpp</a></code></li>\n</ul>\n</li>\n</ul>\n</nav>\n</main>\n<footer id=\"footer\">\n<p>Generated by <a href=\"https://pdoc3.github.io/pdoc\"><cite>pdoc</cite> 0.5.3</a>.</p>\n</footer>\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js\"></script>\n<script>hljs.initHighlightingOnLoad()</script>\n</body>\n</html>"
  },
  {
    "path": "doc/pcmd.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n\n<head>\n    <meta charset=\"utf-8\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, minimum-scale=1\" />\n    <meta name=\"generator\" content=\"pdoc 0.5.3\" />\n    <title>pcpp.pcmd API documentation</title>\n    <meta name=\"description\" content=\"\" />\n    <link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>\n    <link href='https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/8.0.0/sanitize.min.css' rel='stylesheet'>\n    <link href=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css\" rel=\"stylesheet\">\n    <style>\n        .flex {\n            display: flex !important\n        }\n\n        body {\n            line-height: 1.5em\n        }\n\n        #content {\n            padding: 20px\n        }\n\n        #sidebar {\n            padding: 30px;\n            overflow: hidden\n        }\n\n        .http-server-breadcrumbs {\n            font-size: 130%;\n            margin: 0 0 15px 0\n        }\n\n        #footer {\n            font-size: .75em;\n            padding: 5px 30px;\n            border-top: 1px solid #ddd;\n            text-align: right\n        }\n\n        #footer p {\n            margin: 0 0 0 1em;\n            display: inline-block\n        }\n\n        #footer p:last-child {\n            margin-right: 30px\n        }\n\n        h1,\n        h2,\n        h3,\n        h4,\n        h5 {\n            font-weight: 300\n        }\n\n        h1 {\n            font-size: 2.5em;\n            line-height: 1.1em\n        }\n\n        h2 {\n            font-size: 1.75em;\n            margin: 1em 0 .50em 0\n        }\n\n        h3 {\n            font-size: 1.4em;\n            margin: 25px 0 10px 0\n        }\n\n        h4 {\n            margin: 0;\n            font-size: 105%\n        }\n\n        a {\n            color: #058;\n            text-decoration: none;\n            transition: color .3s ease-in-out\n        }\n\n        a:hover {\n            color: #e82\n        }\n\n        .title code {\n            font-weight: bold\n        }\n\n        h2[id^=\"header-\"] {\n            margin-top: 2em\n        }\n\n        .ident {\n            color: #900\n        }\n\n        pre code {\n            background: #f8f8f8;\n            font-size: .8em;\n            line-height: 1.4em\n        }\n\n        code {\n            background: #f2f2f1;\n            padding: 1px 4px;\n            overflow-wrap: break-word\n        }\n\n        h1 code {\n            background: transparent\n        }\n\n        pre {\n            background: #f8f8f8;\n            border: 0;\n            border-top: 1px solid #ccc;\n            border-bottom: 1px solid #ccc;\n            margin: 1em 0;\n            padding: 1ex\n        }\n\n        #http-server-module-list {\n            display: flex;\n            flex-flow: column\n        }\n\n        #http-server-module-list div {\n            display: flex\n        }\n\n        #http-server-module-list dt {\n            min-width: 10%\n        }\n\n        #http-server-module-list p {\n            margin-top: 0\n        }\n\n        .toc ul,\n        #index {\n            list-style-type: none;\n            margin: 0;\n            padding: 0\n        }\n\n        #index code {\n            background: transparent\n        }\n\n        #index h3 {\n            border-bottom: 1px solid #ddd\n        }\n\n        #index ul {\n            padding: 0\n        }\n\n        #index h4 {\n            font-weight: bold\n        }\n\n        #index h4+ul {\n            margin-bottom: .6em\n        }\n\n        @media (min-width:200ex) {\n            #index .two-column {\n                column-count: 2\n            }\n        }\n\n        @media (min-width:300ex) {\n            #index .two-column {\n                column-count: 3\n            }\n        }\n\n        dl {\n            margin-bottom: 2em\n        }\n\n        dl dl:last-child {\n            margin-bottom: 4em\n        }\n\n        dd {\n            margin: 0 0 1em 3em\n        }\n\n        #header-classes+dl>dd {\n            margin-bottom: 3em\n        }\n\n        dd dd {\n            margin-left: 2em\n        }\n\n        dd p {\n            margin: 10px 0\n        }\n\n        .name {\n            background: #eee;\n            font-weight: bold;\n            font-size: .85em;\n            padding: 5px 10px;\n            display: inline-block;\n            min-width: 40%\n        }\n\n        .name:hover {\n            background: #e0e0e0\n        }\n\n        .name>span:first-child {\n            white-space: nowrap\n        }\n\n        .name.class>span:nth-child(2) {\n            margin-left: .4em\n        }\n\n        .name small {\n            font-weight: normal\n        }\n\n        .inherited {\n            color: #999;\n            border-left: 5px solid #eee;\n            padding-left: 1em\n        }\n\n        .inheritance em {\n            font-style: normal;\n            font-weight: bold\n        }\n\n        .desc h2 {\n            font-weight: 400;\n            font-size: 1.25em\n        }\n\n        .desc h3 {\n            font-size: 1em\n        }\n\n        .desc dt code {\n            background: inherit\n        }\n\n        .source summary {\n            color: #666;\n            text-align: right;\n            font-weight: 400;\n            font-size: .8em;\n            text-transform: uppercase;\n            cursor: pointer\n        }\n\n        .source pre {\n            max-height: 500px;\n            overflow: auto;\n            margin: 0\n        }\n\n        .source pre code {\n            font-size: 12px;\n            overflow: visible\n        }\n\n        .hlist {\n            list-style: none\n        }\n\n        .hlist li {\n            display: inline\n        }\n\n        .hlist li:after {\n            content: ',\\2002'\n        }\n\n        .hlist li:last-child:after {\n            content: none\n        }\n\n        .hlist .hlist {\n            display: inline;\n            padding-left: 1em\n        }\n\n        img {\n            max-width: 100%\n        }\n\n        .admonition {\n            padding: .1em .5em\n        }\n\n        .admonition-title {\n            font-weight: bold\n        }\n\n        .admonition.note,\n        .admonition.info,\n        .admonition.important {\n            background: #aef\n        }\n\n        .admonition.todo,\n        .admonition.versionadded,\n        .admonition.tip,\n        .admonition.hint {\n            background: #dfd\n        }\n\n        .admonition.warning,\n        .admonition.versionchanged,\n        .admonition.deprecated {\n            background: #fd4\n        }\n\n        .admonition.error,\n        .admonition.danger,\n        .admonition.caution {\n            background: lightpink\n        }\n    </style>\n    <style media=\"screen and (min-width: 700px)\">\n        @media screen and (min-width:700px) {\n            #sidebar {\n                width: 30%\n            }\n\n            #content {\n                width: 70%;\n                max-width: 100ch;\n                padding: 3em 4em;\n                border-left: 1px solid #ddd\n            }\n\n            pre code {\n                font-size: 1em\n            }\n\n            .item .name {\n                font-size: 1em\n            }\n\n            main {\n                display: flex;\n                flex-direction: row-reverse;\n                justify-content: flex-end\n            }\n\n            .toc ul ul,\n            #index ul {\n                padding-left: 1.5em\n            }\n\n            .toc>ul>li {\n                margin-top: .5em\n            }\n        }\n    </style>\n    <style media=\"print\">\n        @media print {\n            #sidebar h1 {\n                page-break-before: always\n            }\n\n            .source {\n                display: none\n            }\n        }\n\n        @media print {\n            * {\n                background: transparent !important;\n                color: #000 !important;\n                box-shadow: none !important;\n                text-shadow: none !important\n            }\n\n            a[href]:after {\n                content: \" (\" attr(href) \")\";\n                font-size: 90%\n            }\n\n            a[href][title]:after {\n                content: none\n            }\n\n            abbr[title]:after {\n                content: \" (\" attr(title) \")\"\n            }\n\n            .ir a:after,\n            a[href^=\"javascript:\"]:after,\n            a[href^=\"#\"]:after {\n                content: \"\"\n            }\n\n            pre,\n            blockquote {\n                border: 1px solid #999;\n                page-break-inside: avoid\n            }\n\n            thead {\n                display: table-header-group\n            }\n\n            tr,\n            img {\n                page-break-inside: avoid\n            }\n\n            img {\n                max-width: 100% !important\n            }\n\n            @page {\n                margin: 0.5cm\n            }\n\n            p,\n            h2,\n            h3 {\n                orphans: 3;\n                widows: 3\n            }\n\n            h1,\n            h2,\n            h3,\n            h4,\n            h5,\n            h6 {\n                page-break-after: avoid\n            }\n        }\n    </style>\n</head>\n\n<body>\n    <main>\n        <article id=\"content\">\n            <header>\n                <h1 class=\"title\"><code>pcpp.pcmd</code> module</h1>\n            </header>\n            <section id=\"section-intro\">\n                <details class=\"source\">\n                    <summary>Source code</summary>\n                    <pre><code class=\"python\">#!/usr/bin/python\n# Python C99 conforming preprocessor command line\n# (C) 2017-2026 Niall Douglas http://www.nedproductions.biz/\n# Started: March 2017\n\nfrom __future__ import generators, print_function, absolute_import, division\n\nimport sys, argparse, traceback, os, copy, io, re\nif __name__ == &#39;__main__&#39; and __package__ is None:\n    sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )\nfrom pcpp.preprocessor import Preprocessor, OutputDirective, Action\n\nversion=&#39;1.30&#39;\n\n__all__ = []\n\nclass FileAction(argparse.Action):\n    def __init__(self, option_strings, dest, **kwargs):\n        super(FileAction, self).__init__(option_strings, dest, **kwargs)\n        \n    def __call__(self, parser, namespace, values, option_string=None):\n        if getattr(namespace, self.dest)[0] == sys.stdin:\n            items = []\n        else:\n            items = copy.copy(getattr(namespace, self.dest))\n        items += [argparse.FileType(&#39;rt&#39;)(value) for value in values]\n        setattr(namespace, self.dest, items)\n\nclass CmdPreprocessor(Preprocessor):\n    def __init__(self, argv):\n        if len(argv) &lt; 2:\n            argv = [argv[0], &#39;--help&#39;]\n        argp = argparse.ArgumentParser(prog=&#39;pcpp&#39;,\n            description=\n    &#39;&#39;&#39;A pure universal Python C (pre-)preprocessor implementation very useful for\n    pre-preprocessing header only C++ libraries into single file includes and\n    other such build or packaging stage malarky.&#39;&#39;&#39;,\n            epilog=\n    &#39;&#39;&#39;Note that so pcpp can stand in for other preprocessor tooling, it\n    ignores any arguments it does not understand.&#39;&#39;&#39;)\n        argp.add_argument(&#39;inputs&#39;, metavar = &#39;input&#39;, default = [sys.stdin], nargs = &#39;*&#39;, action = FileAction, help = &#39;Files to preprocess (use \\&#39;-\\&#39; for stdin)&#39;)\n        argp.add_argument(&#39;-o&#39;, dest = &#39;output&#39;, metavar = &#39;path&#39;, type = argparse.FileType(&#39;wt&#39;), default=sys.stdout, nargs = &#39;?&#39;, help = &#39;Output to a file instead of stdout&#39;)\n        argp.add_argument(&#39;-D&#39;, dest = &#39;defines&#39;, metavar = &#39;macro[=val]&#39;, nargs = 1, action = &#39;append&#39;, help = &#39;Predefine name as a macro [with value]&#39;)\n        argp.add_argument(&#39;-U&#39;, dest = &#39;undefines&#39;, metavar = &#39;macro&#39;, nargs = 1, action = &#39;append&#39;, help = &#39;Pre-undefine name as a macro&#39;)\n        argp.add_argument(&#39;-N&#39;, dest = &#39;nevers&#39;, metavar = &#39;macro&#39;, nargs = 1, action = &#39;append&#39;, help = &#39;Never define name as a macro, even if defined during the preprocessing.&#39;)\n        argp.add_argument(&#39;-I&#39;, dest = &#39;includes&#39;, metavar = &#39;path&#39;, nargs = 1, action = &#39;append&#39;, help = &#34;Path to search for unfound #include&#39;s&#34;)\n        #argp.add_argument(&#39;--passthru&#39;, dest = &#39;passthru&#39;, action = &#39;store_true&#39;, help = &#39;Pass through everything unexecuted except for #include and include guards (which need to be the first thing in an include file&#39;)\n        argp.add_argument(&#39;--passthru-defines&#39;, dest = &#39;passthru_defines&#39;, action = &#39;store_true&#39;, help = &#39;Pass through but still execute #defines and #undefs if not always removed by preprocessor logic&#39;)\n        argp.add_argument(&#39;--passthru-unfound-includes&#39;, dest = &#39;passthru_unfound_includes&#39;, action = &#39;store_true&#39;, help = &#39;Pass through #includes not found without execution&#39;)\n        argp.add_argument(&#39;--passthru-unknown-exprs&#39;, dest = &#39;passthru_undefined_exprs&#39;, action = &#39;store_true&#39;, help = &#39;Unknown macros in expressions cause preprocessor logic to be passed through instead of executed by treating unknown macros as 0L&#39;)\n        argp.add_argument(&#39;--passthru-comments&#39;, dest = &#39;passthru_comments&#39;, action = &#39;store_true&#39;, help = &#39;Pass through comments unmodified&#39;)\n        argp.add_argument(&#39;--passthru-magic-macros&#39;, dest = &#39;passthru_magic_macros&#39;, action = &#39;store_true&#39;, help = &#39;Pass through double underscore magic macros unmodified&#39;)\n        argp.add_argument(&#39;--passthru-includes&#39;, dest = &#39;passthru_includes&#39;, metavar = &#39;&lt;regex&gt;&#39;, default = None, nargs = 1, help = &#34;Regular expression for which #includes to not expand. #includes, if found, are always executed&#34;)\n        argp.add_argument(&#39;--disable-auto-pragma-once&#39;, dest = &#39;auto_pragma_once_disabled&#39;, action = &#39;store_true&#39;, default = False, help = &#39;Disable the heuristics which auto apply #pragma once to #include files wholly wrapped in an obvious include guard macro&#39;)\n        argp.add_argument(&#39;--line-directive&#39;, dest = &#39;line_directive&#39;, metavar = &#39;form&#39;, default = &#39;#line&#39;, nargs = &#39;?&#39;, help = &#34;Form of line directive to use, defaults to #line, specify nothing to disable output of line directives&#34;)\n        argp.add_argument(&#39;--debug&#39;, dest = &#39;debug&#39;, action = &#39;store_true&#39;, help = &#39;Generate a pcpp_debug.log file logging execution&#39;)\n        argp.add_argument(&#39;--time&#39;, dest = &#39;time&#39;, action = &#39;store_true&#39;, help = &#39;Print the time it took to #include each file&#39;)\n        argp.add_argument(&#39;--filetimes&#39;, dest = &#39;filetimes&#39;, metavar = &#39;path&#39;, type = argparse.FileType(&#39;wt&#39;), default=None, nargs = &#39;?&#39;, help = &#39;Write CSV file with time spent inside each included file, inclusive and exclusive&#39;)\n        argp.add_argument(&#39;--compress&#39;, dest = &#39;compress&#39;, action = &#39;store_true&#39;, help = &#39;Make output as small as possible&#39;)\n        argp.add_argument(&#39;--assume-input-encoding&#39;, dest = &#39;assume_input_encoding&#39;, metavar = &#39;&lt;encoding&gt;&#39;, default = None, nargs = 1, help = &#39;The text encoding to assume inputs are in&#39;)\n        argp.add_argument(&#39;--output-encoding&#39;, dest = &#39;output_encoding&#39;, metavar = &#39;&lt;encoding&gt;&#39;, default = None, nargs = 1, help = &#39;The text encoding to use when writing files&#39;)\n        argp.add_argument(&#39;--write-bom&#39;, dest = &#39;write_bom&#39;, action = &#39;store_true&#39;, help = &#39;Prefix any output with a Unicode BOM&#39;)\n        argp.add_argument(&#39;--version&#39;, action=&#39;version&#39;, version=&#39;pcpp &#39; + version)\n        args = argp.parse_known_args(argv[1:])\n        #print(args)\n        for arg in args[1]:\n            print(&#34;NOTE: Argument %s not known, ignoring!&#34; % arg, file = sys.stderr)\n\n        self.args = args[0]\n        super(CmdPreprocessor, self).__init__()\n        \n        # Override Preprocessor instance variables\n        self.define(&#34;__PCPP_VERSION__ &#34; + version)\n        self.define(&#34;__PCPP_ALWAYS_FALSE__ 0&#34;)\n        self.define(&#34;__PCPP_ALWAYS_TRUE__ 1&#34;)\n        if self.args.debug:\n            self.debugout = open(&#34;pcpp_debug.log&#34;, &#34;wt&#34;)\n        self.auto_pragma_once_enabled = not self.args.auto_pragma_once_disabled\n        self.line_directive = self.args.line_directive\n        if self.line_directive is not None and self.line_directive.lower() in (&#39;nothing&#39;, &#39;none&#39;, &#39;&#39;):\n            self.line_directive = None\n        if self.args.passthru_includes is not None:\n            self.passthru_includes = re.compile(self.args.passthru_includes[0])\n        self.compress = 2 if self.args.compress else 0\n        if self.args.passthru_magic_macros:\n            self.undef(&#39;__DATE__&#39;)\n            self.undef(&#39;__TIME__&#39;)\n            self.expand_linemacro = False\n            self.expand_filemacro = False\n            self.expand_countermacro = False\n        if self.args.assume_input_encoding is not None:\n            self.args.assume_input_encoding = self.args.assume_input_encoding[0]\n            self.assume_encoding = self.args.assume_input_encoding\n            if len(self.args.inputs) == 1:\n                # Reopen our input files with the appropriate encoding\n                _ = self.on_file_open(False, self.args.inputs[0].name)\n                self.args.inputs[0].close()\n                self.args.inputs[0] = _\n            if self.args.output_encoding is None:\n                self.args.output_encoding = self.args.assume_input_encoding\n        if self.args.output_encoding is not None:\n            self.args.output_encoding = self.args.output_encoding[0]\n            # Reopen our output file with the appropriate encoding\n            _ = io.open(self.args.output.name, &#39;w&#39;, encoding = self.args.output_encoding)\n            self.args.output.close()\n            self.args.output = _\n            if self.args.write_bom:\n                self.args.output.write(&#39;\\ufeff&#39;)\n        \n        # My own instance variables\n        self.bypass_ifpassthru = False\n        self.potential_include_guard = None\n\n        if self.args.defines:\n            self.args.defines = [x[0] for x in self.args.defines]\n            for d in self.args.defines:\n                if &#39;=&#39; not in d:\n                    d += &#39;=1&#39;\n                d = d.replace(&#39;=&#39;, &#39; &#39;, 1)\n                self.define(d)\n        if self.args.undefines:\n            self.args.undefines = [x[0] for x in self.args.undefines]\n            for d in self.args.undefines:\n                self.undef(d)\n        if self.args.nevers:\n            self.args.nevers = [x[0] for x in self.args.nevers]\n        if self.args.includes:\n            self.args.includes = [x[0] for x in self.args.includes]\n            for d in self.args.includes:\n                self.add_path(d)\n\n        try:\n            if len(self.args.inputs) == 1:\n                self.parse(self.args.inputs[0])\n            else:\n                input = &#39;&#39;\n                for i in self.args.inputs:\n                    input += &#39;#include &#34;&#39; + i.name + &#39;&#34;\\n&#39;\n                self.parse(input)\n            self.write(self.args.output)\n        except:\n            print(traceback.print_exc(10), file = sys.stderr)\n            print(&#34;\\nINTERNAL PREPROCESSOR ERROR AT AROUND %s:%d, FATALLY EXITING NOW\\n&#34;\n                % (self.lastdirective.source, self.lastdirective.lineno), file = sys.stderr)\n            sys.exit(-99)\n        finally:\n            for i in self.args.inputs:\n                i.close()\n            if self.args.output != sys.stdout:\n                self.args.output.close()\n        \n        if self.args.time:\n            print(&#34;\\nTime report:&#34;)\n            print(&#34;============&#34;)\n            for n in range(0, len(self.include_times)):\n                if n == 0:\n                    print(&#34;top level: %f seconds&#34; % self.include_times[n].elapsed)\n                elif self.include_times[n].depth == 1:\n                    print(&#34;\\n %s: %f seconds (%f%%)&#34; % (self.include_times[n].included_path, self.include_times[n].elapsed, 100 * self.include_times[n].elapsed / self.include_times[0].elapsed))\n                else:\n                    print(&#34;%s%s: %f seconds&#34; % (&#39; &#39; * self.include_times[n].depth, self.include_times[n].included_path, self.include_times[n].elapsed))\n            print(&#34;\\nPragma once files (including heuristically applied):&#34;)\n            print(&#34;====================================================&#34;)\n            for i in self.include_once:\n                print(&#34; &#34;, i)\n            print()\n        if self.args.filetimes:\n            print(&#39;&#34;Total seconds&#34;,&#34;Self seconds&#34;,&#34;File size&#34;,&#34;File path&#34;&#39;, file = self.args.filetimes)\n            filetimes = {}\n            currentfiles = []\n            for n in range(0, len(self.include_times)):\n                while self.include_times[n].depth &lt; len(currentfiles):\n                    currentfiles.pop()\n                if self.include_times[n].depth &gt; len(currentfiles) - 1:\n                    currentfiles.append(self.include_times[n].included_abspath)\n                #print()\n                #for path in currentfiles:\n                #    print(&#34;currentfiles =&#34;, path)\n                path = currentfiles[-1]\n                if path in filetimes:\n                    filetimes[path][0] += self.include_times[n].elapsed\n                    filetimes[path][1] += self.include_times[n].elapsed\n                else:\n                    filetimes[path] = [self.include_times[n].elapsed, self.include_times[n].elapsed]\n                if self.include_times[n].elapsed &gt; 0 and len(currentfiles) &gt; 1:\n                    #print(&#34;Removing child %f from parent %s = %f&#34; % (self.include_times[n].elapsed, currentfiles[-2], filetimes[currentfiles[-2]]))\n                    filetimes[currentfiles[-2]][1] -= self.include_times[n].elapsed\n            filetimes = [(v[0],v[1],k) for k,v in filetimes.items()]\n            filetimes.sort(reverse=True)\n            for t,s,p in filetimes:\n                print((&#39;%f,%f,%d,&#34;%s&#34;&#39; % (t, s, os.stat(p).st_size, p)), file = self.args.filetimes)\n    def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n        if self.args.passthru_unfound_includes:\n            raise OutputDirective(Action.IgnoreAndPassThrough)\n        return super(CmdPreprocessor, self).on_include_not_found(is_malformed,is_system_include,curdir,includepath)\n\n    def on_unknown_macro_in_defined_expr(self,tok):\n        if self.args.undefines:\n            if tok.value in self.args.undefines:\n                return False\n        if self.args.passthru_undefined_exprs:\n            return None  # Pass through as expanded as possible\n        return super(CmdPreprocessor, self).on_unknown_macro_in_defined_expr(tok)\n        \n    def on_unknown_macro_in_expr(self,ident):\n        if self.args.undefines:\n            if ident in self.args.undefines:\n                return super(CmdPreprocessor, self).on_unknown_macro_in_expr(ident)\n        if self.args.passthru_undefined_exprs:\n            return None  # Pass through as expanded as possible\n        return super(CmdPreprocessor, self).on_unknown_macro_in_expr(ident)\n        \n    def on_unknown_macro_function_in_expr(self,ident):\n        if self.args.undefines:\n            if ident in self.args.undefines:\n                return super(CmdPreprocessor, self).on_unknown_macro_function_in_expr(ident)\n        if self.args.passthru_undefined_exprs:\n            return None  # Pass through as expanded as possible\n        return super(CmdPreprocessor, self).on_unknown_macro_function_in_expr(ident)\n        \n    def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n        if ifpassthru:\n            if directive.value == &#39;if&#39; or directive.value == &#39;elif&#39; or directive == &#39;else&#39; or directive.value == &#39;endif&#39;:\n                self.bypass_ifpassthru = len([tok for tok in toks if tok.value == &#39;__PCPP_ALWAYS_FALSE__&#39; or tok.value == &#39;__PCPP_ALWAYS_TRUE__&#39;]) &gt; 0\n            if not self.bypass_ifpassthru and (directive.value == &#39;define&#39; or directive.value == &#39;undef&#39;):\n                if toks[0].value != self.potential_include_guard:\n                    raise OutputDirective(Action.IgnoreAndPassThrough)  # Don&#39;t execute anything with effects when inside an #if expr with undefined macro\n        if (directive.value == &#39;define&#39; or directive.value == &#39;undef&#39;) and self.args.nevers:\n            if toks[0].value in self.args.nevers:\n                raise OutputDirective(Action.IgnoreAndPassThrough)\n        if self.args.passthru_defines:\n            super(CmdPreprocessor, self).on_directive_handle(directive,toks,ifpassthru,precedingtoks)\n            return None  # Pass through where possible\n        return super(CmdPreprocessor, self).on_directive_handle(directive,toks,ifpassthru,precedingtoks)\n\n    def on_directive_unknown(self,directive,toks,ifpassthru,precedingtoks):\n        if ifpassthru:\n            return None  # Pass through\n        return super(CmdPreprocessor, self).on_directive_unknown(directive,toks,ifpassthru,precedingtoks)\n\n    def on_potential_include_guard(self,macro):\n        self.potential_include_guard = macro\n        return super(CmdPreprocessor, self).on_potential_include_guard(macro)\n\n    def on_comment(self,tok):\n        if self.args.passthru_comments:\n            return True  # Pass through\n        return super(CmdPreprocessor, self).on_comment(tok)\n\ndef main():\n    p = CmdPreprocessor(sys.argv)\n    sys.exit(p.return_code)\n        \nif __name__ == &#34;__main__&#34;:\n    p = CmdPreprocessor(sys.argv)\n    sys.exit(p.return_code)</code></pre>\n                </details>\n            </section>\n            <section>\n            </section>\n            <section>\n            </section>\n            <section>\n            </section>\n            <section>\n            </section>\n        </article>\n        <nav id=\"sidebar\">\n            <h1>Index</h1>\n            <div class=\"toc\">\n                <ul></ul>\n            </div>\n            <ul id=\"index\">\n                <li>\n                    <h3>Super-module</h3>\n                    <ul>\n                        <li><code><a title=\"pcpp\" href=\"index.html\">pcpp</a></code></li>\n                    </ul>\n                </li>\n            </ul>\n        </nav>\n    </main>\n    <footer id=\"footer\">\n        <p>Generated by <a href=\"https://pdoc3.github.io/pdoc\"><cite>pdoc</cite> 0.5.3</a>.</p>\n    </footer>\n    <script src=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js\"></script>\n    <script>hljs.initHighlightingOnLoad()</script>\n</body>\n\n</html>"
  },
  {
    "path": "doc/preprocessor.html",
    "content": "<!doctype html>\n<html lang=\"en\">\n\n<head>\n    <meta charset=\"utf-8\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, minimum-scale=1\" />\n    <meta name=\"generator\" content=\"pdoc 0.5.3\" />\n    <title>pcpp.preprocessor API documentation</title>\n    <meta name=\"description\" content=\"\" />\n    <link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>\n    <link href='https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/8.0.0/sanitize.min.css' rel='stylesheet'>\n    <link href=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css\" rel=\"stylesheet\">\n    <style>\n        .flex {\n            display: flex !important\n        }\n\n        body {\n            line-height: 1.5em\n        }\n\n        #content {\n            padding: 20px\n        }\n\n        #sidebar {\n            padding: 30px;\n            overflow: hidden\n        }\n\n        .http-server-breadcrumbs {\n            font-size: 130%;\n            margin: 0 0 15px 0\n        }\n\n        #footer {\n            font-size: .75em;\n            padding: 5px 30px;\n            border-top: 1px solid #ddd;\n            text-align: right\n        }\n\n        #footer p {\n            margin: 0 0 0 1em;\n            display: inline-block\n        }\n\n        #footer p:last-child {\n            margin-right: 30px\n        }\n\n        h1,\n        h2,\n        h3,\n        h4,\n        h5 {\n            font-weight: 300\n        }\n\n        h1 {\n            font-size: 2.5em;\n            line-height: 1.1em\n        }\n\n        h2 {\n            font-size: 1.75em;\n            margin: 1em 0 .50em 0\n        }\n\n        h3 {\n            font-size: 1.4em;\n            margin: 25px 0 10px 0\n        }\n\n        h4 {\n            margin: 0;\n            font-size: 105%\n        }\n\n        a {\n            color: #058;\n            text-decoration: none;\n            transition: color .3s ease-in-out\n        }\n\n        a:hover {\n            color: #e82\n        }\n\n        .title code {\n            font-weight: bold\n        }\n\n        h2[id^=\"header-\"] {\n            margin-top: 2em\n        }\n\n        .ident {\n            color: #900\n        }\n\n        pre code {\n            background: #f8f8f8;\n            font-size: .8em;\n            line-height: 1.4em\n        }\n\n        code {\n            background: #f2f2f1;\n            padding: 1px 4px;\n            overflow-wrap: break-word\n        }\n\n        h1 code {\n            background: transparent\n        }\n\n        pre {\n            background: #f8f8f8;\n            border: 0;\n            border-top: 1px solid #ccc;\n            border-bottom: 1px solid #ccc;\n            margin: 1em 0;\n            padding: 1ex\n        }\n\n        #http-server-module-list {\n            display: flex;\n            flex-flow: column\n        }\n\n        #http-server-module-list div {\n            display: flex\n        }\n\n        #http-server-module-list dt {\n            min-width: 10%\n        }\n\n        #http-server-module-list p {\n            margin-top: 0\n        }\n\n        .toc ul,\n        #index {\n            list-style-type: none;\n            margin: 0;\n            padding: 0\n        }\n\n        #index code {\n            background: transparent\n        }\n\n        #index h3 {\n            border-bottom: 1px solid #ddd\n        }\n\n        #index ul {\n            padding: 0\n        }\n\n        #index h4 {\n            font-weight: bold\n        }\n\n        #index h4+ul {\n            margin-bottom: .6em\n        }\n\n        @media (min-width:200ex) {\n            #index .two-column {\n                column-count: 2\n            }\n        }\n\n        @media (min-width:300ex) {\n            #index .two-column {\n                column-count: 3\n            }\n        }\n\n        dl {\n            margin-bottom: 2em\n        }\n\n        dl dl:last-child {\n            margin-bottom: 4em\n        }\n\n        dd {\n            margin: 0 0 1em 3em\n        }\n\n        #header-classes+dl>dd {\n            margin-bottom: 3em\n        }\n\n        dd dd {\n            margin-left: 2em\n        }\n\n        dd p {\n            margin: 10px 0\n        }\n\n        .name {\n            background: #eee;\n            font-weight: bold;\n            font-size: .85em;\n            padding: 5px 10px;\n            display: inline-block;\n            min-width: 40%\n        }\n\n        .name:hover {\n            background: #e0e0e0\n        }\n\n        .name>span:first-child {\n            white-space: nowrap\n        }\n\n        .name.class>span:nth-child(2) {\n            margin-left: .4em\n        }\n\n        .name small {\n            font-weight: normal\n        }\n\n        .inherited {\n            color: #999;\n            border-left: 5px solid #eee;\n            padding-left: 1em\n        }\n\n        .inheritance em {\n            font-style: normal;\n            font-weight: bold\n        }\n\n        .desc h2 {\n            font-weight: 400;\n            font-size: 1.25em\n        }\n\n        .desc h3 {\n            font-size: 1em\n        }\n\n        .desc dt code {\n            background: inherit\n        }\n\n        .source summary {\n            color: #666;\n            text-align: right;\n            font-weight: 400;\n            font-size: .8em;\n            text-transform: uppercase;\n            cursor: pointer\n        }\n\n        .source pre {\n            max-height: 500px;\n            overflow: auto;\n            margin: 0\n        }\n\n        .source pre code {\n            font-size: 12px;\n            overflow: visible\n        }\n\n        .hlist {\n            list-style: none\n        }\n\n        .hlist li {\n            display: inline\n        }\n\n        .hlist li:after {\n            content: ',\\2002'\n        }\n\n        .hlist li:last-child:after {\n            content: none\n        }\n\n        .hlist .hlist {\n            display: inline;\n            padding-left: 1em\n        }\n\n        img {\n            max-width: 100%\n        }\n\n        .admonition {\n            padding: .1em .5em\n        }\n\n        .admonition-title {\n            font-weight: bold\n        }\n\n        .admonition.note,\n        .admonition.info,\n        .admonition.important {\n            background: #aef\n        }\n\n        .admonition.todo,\n        .admonition.versionadded,\n        .admonition.tip,\n        .admonition.hint {\n            background: #dfd\n        }\n\n        .admonition.warning,\n        .admonition.versionchanged,\n        .admonition.deprecated {\n            background: #fd4\n        }\n\n        .admonition.error,\n        .admonition.danger,\n        .admonition.caution {\n            background: lightpink\n        }\n    </style>\n    <style media=\"screen and (min-width: 700px)\">\n        @media screen and (min-width:700px) {\n            #sidebar {\n                width: 30%\n            }\n\n            #content {\n                width: 70%;\n                max-width: 100ch;\n                padding: 3em 4em;\n                border-left: 1px solid #ddd\n            }\n\n            pre code {\n                font-size: 1em\n            }\n\n            .item .name {\n                font-size: 1em\n            }\n\n            main {\n                display: flex;\n                flex-direction: row-reverse;\n                justify-content: flex-end\n            }\n\n            .toc ul ul,\n            #index ul {\n                padding-left: 1.5em\n            }\n\n            .toc>ul>li {\n                margin-top: .5em\n            }\n        }\n    </style>\n    <style media=\"print\">\n        @media print {\n            #sidebar h1 {\n                page-break-before: always\n            }\n\n            .source {\n                display: none\n            }\n        }\n\n        @media print {\n            * {\n                background: transparent !important;\n                color: #000 !important;\n                box-shadow: none !important;\n                text-shadow: none !important\n            }\n\n            a[href]:after {\n                content: \" (\" attr(href) \")\";\n                font-size: 90%\n            }\n\n            a[href][title]:after {\n                content: none\n            }\n\n            abbr[title]:after {\n                content: \" (\" attr(title) \")\"\n            }\n\n            .ir a:after,\n            a[href^=\"javascript:\"]:after,\n            a[href^=\"#\"]:after {\n                content: \"\"\n            }\n\n            pre,\n            blockquote {\n                border: 1px solid #999;\n                page-break-inside: avoid\n            }\n\n            thead {\n                display: table-header-group\n            }\n\n            tr,\n            img {\n                page-break-inside: avoid\n            }\n\n            img {\n                max-width: 100% !important\n            }\n\n            @page {\n                margin: 0.5cm\n            }\n\n            p,\n            h2,\n            h3 {\n                orphans: 3;\n                widows: 3\n            }\n\n            h1,\n            h2,\n            h3,\n            h4,\n            h5,\n            h6 {\n                page-break-after: avoid\n            }\n        }\n    </style>\n</head>\n\n<body>\n    <main>\n        <article id=\"content\">\n            <header>\n                <h1 class=\"title\"><code>pcpp.preprocessor</code> module</h1>\n            </header>\n            <section id=\"section-intro\">\n                <details class=\"source\">\n                    <summary>Source code</summary>\n                    <pre><code class=\"python\">#!/usr/bin/python\n# Python C99 conforming preprocessor useful for generating single include files\n# (C) 2017-2026 Niall Douglas http://www.nedproductions.biz/\n# and (C) 2007-2017 David Beazley http://www.dabeaz.com/\n# Started: Feb 2017\n#\n# This C preprocessor was originally written by David Beazley and the\n# original can be found at https://github.com/dabeaz/ply/blob/master/ply/cpp.py\n# This edition substantially improves on standards conforming output,\n# getting quite close to what clang or GCC outputs.\n\nfrom __future__ import generators, print_function, absolute_import, division\n\nimport sys, os, re, codecs, time, copy, traceback\nif __name__ == &#39;__main__&#39; and __package__ is None:\n    sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )\nfrom pcpp.parser import STRING_TYPES, default_lexer, trigraph, Macro, Action, OutputDirective, PreprocessorHooks\nfrom pcpp.evaluator import Evaluator\n\n# Some Python 3 compatibility shims\nif sys.version_info.major &lt; 3:\n    FILE_TYPES = file\n    clock = time.clock\nelse:\n    xrange = range\n    import io\n    FILE_TYPES = io.IOBase\n    clock = time.process_time\n\n__all__ = [&#39;Preprocessor&#39;, &#39;PreprocessorHooks&#39;, &#39;OutputDirective&#39;, &#39;Action&#39;, &#39;Evaluator&#39;]\n\n# ------------------------------------------------------------------\n# File inclusion timings\n#\n# Useful for figuring out how long a sequence of preprocessor inclusions actually is\n# ------------------------------------------------------------------\n\nclass FileInclusionTime(object):\n    &#34;&#34;&#34;The seconds taken to #include another file&#34;&#34;&#34;\n    def __init__(self,including_path,included_path,included_abspath,depth):\n        self.including_path = including_path\n        self.included_path = included_path\n        self.included_abspath = included_abspath\n        self.depth = depth\n        self.elapsed = 0.0\n\n# ------------------------------------------------------------------\n# Preprocessor object\n#\n# Object representing a preprocessor.  Contains macro definitions,\n# include directories, and other information\n# ------------------------------------------------------------------\n\nclass Preprocessor(PreprocessorHooks):    \n    def __init__(self,lexer=None):\n        super(Preprocessor, self).__init__()\n        if lexer is None:\n            lexer = default_lexer()\n        self.lexer = lexer\n        self.evaluator = Evaluator(self.lexer)\n        self.macros = { }\n        self.path = []           # list of -I formal search paths for includes\n        self.temp_path = []      # list of temporary search paths for includes\n        self.rewrite_paths = [(re.escape(os.path.abspath(&#39;&#39;) + os.sep) + &#39;(.*)&#39;, &#39;\\\\1&#39;)]\n        self.passthru_includes = None\n        self.include_once = {}\n        self.include_depth = 0\n        self.include_times = []  # list of FileInclusionTime\n        self.return_code = 0\n        self.debugout = None\n        self.auto_pragma_once_enabled = True\n        self.line_directive = &#39;#line&#39;\n        self.compress = False\n        self.assume_encoding = None\n\n        # Probe the lexer for selected tokens\n        self.__lexprobe()\n\n        tm = time.localtime()\n        self.define(&#34;__DATE__ \\&#34;%s\\&#34;&#34; % time.strftime(&#34;%b %d %Y&#34;,tm))\n        self.define(&#34;__TIME__ \\&#34;%s\\&#34;&#34; % time.strftime(&#34;%H:%M:%S&#34;,tm))\n        self.define(&#34;__PCPP__ 1&#34;)\n        self.expand_linemacro = True\n        self.expand_filemacro = True\n        self.expand_countermacro = True\n        self.linemacro = 0\n        self.linemacrodepth = 0\n        self.countermacro = 0\n        self.parser = None\n\n    # -----------------------------------------------------------------------------\n    # tokenize()\n    #\n    # Utility function. Given a string of text, tokenize into a list of tokens\n    # -----------------------------------------------------------------------------\n\n    def tokenize(self,text):\n        &#34;&#34;&#34;Utility function. Given a string of text, tokenize into a list of tokens&#34;&#34;&#34;\n        tokens = []\n        self.lexer.input(text)\n        while True:\n            tok = self.lexer.token()\n            if not tok: break\n            tok.source = &#39;&#39;\n            tokens.append(tok)\n        return tokens\n\n    # ----------------------------------------------------------------------\n    # __lexprobe()\n    #\n    # This method probes the preprocessor lexer object to discover\n    # the token types of symbols that are important to the preprocessor.\n    # If this works right, the preprocessor will simply &#34;work&#34;\n    # with any suitable lexer regardless of how tokens have been named.\n    # ----------------------------------------------------------------------\n\n    def __lexprobe(self):\n\n        # Determine the token type for identifiers\n        self.lexer.input(&#34;identifier&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;identifier&#34;:\n            print(&#34;Couldn&#39;t determine identifier type&#34;)\n        else:\n            self.t_ID = tok.type\n\n        # Determine the token type for integers\n        self.lexer.input(&#34;12345&#34;)\n        tok = self.lexer.token()\n        if not tok or int(tok.value) != 12345:\n            print(&#34;Couldn&#39;t determine integer type&#34;)\n        else:\n            self.t_INTEGER = tok.type\n            self.t_INTEGER_TYPE = type(tok.value)\n\n        # Determine the token type for character\n        self.lexer.input(&#34;&#39;a&#39;&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;&#39;a&#39;&#34;:\n            print(&#34;Couldn&#39;t determine character type&#34;)\n        else:\n            self.t_CHAR = tok.type\n            \n        # Determine the token type for strings enclosed in double quotes\n        self.lexer.input(&#34;\\&#34;filename\\&#34;&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;\\&#34;filename\\&#34;&#34;:\n            print(&#34;Couldn&#39;t determine string type&#34;)\n        else:\n            self.t_STRING = tok.type\n\n        # Determine the token type for whitespace--if any\n        self.lexer.input(&#34;  &#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;  &#34;:\n            self.t_SPACE = None\n        else:\n            self.t_SPACE = tok.type\n\n        # Determine the token type for newlines\n        self.lexer.input(&#34;\\n&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;\\n&#34;:\n            self.t_NEWLINE = None\n            print(&#34;Couldn&#39;t determine token for newlines&#34;)\n        else:\n            self.t_NEWLINE = tok.type\n\n        # Determine the token type for line continuations\n        self.lexer.input(&#34;\\\\     \\n&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;     &#34;:\n            self.t_LINECONT = None\n            print(&#34;Couldn&#39;t determine token for line continuations&#34;)\n        else:\n            self.t_LINECONT = tok.type\n\n        self.t_WS = (self.t_SPACE, self.t_NEWLINE, self.t_LINECONT)\n\n        self.lexer.input(&#34;##&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;##&#34;:\n            print(&#34;Couldn&#39;t determine token for token pasting operator&#34;)\n        else:\n            self.t_DPOUND = tok.type\n\n        self.lexer.input(&#34;?&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;?&#34;:\n            print(&#34;Couldn&#39;t determine token for ternary operator&#34;)\n        else:\n            self.t_TERNARY = tok.type\n\n        self.lexer.input(&#34;:&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;:&#34;:\n            print(&#34;Couldn&#39;t determine token for ternary operator&#34;)\n        else:\n            self.t_COLON = tok.type\n\n        self.lexer.input(&#34;/* comment */&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;/* comment */&#34;:\n            print(&#34;Couldn&#39;t determine comment type&#34;)\n        else:\n            self.t_COMMENT1 = tok.type\n\n        self.lexer.input(&#34;// comment&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;// comment&#34;:\n            print(&#34;Couldn&#39;t determine comment type&#34;)\n        else:\n            self.t_COMMENT2 = tok.type\n            \n        self.t_COMMENT = (self.t_COMMENT1, self.t_COMMENT2)\n\n        # Check for other characters used by the preprocessor\n        chars = [ &#39;&lt;&#39;,&#39;&gt;&#39;,&#39;#&#39;,&#39;##&#39;,&#39;\\\\&#39;,&#39;(&#39;,&#39;)&#39;,&#39;,&#39;,&#39;.&#39;]\n        for c in chars:\n            self.lexer.input(c)\n            tok = self.lexer.token()\n            if not tok or tok.value != c:\n                print(&#34;Unable to lex &#39;%s&#39; required for preprocessor&#34; % c)\n\n    # ----------------------------------------------------------------------\n    # add_path()\n    #\n    # Adds a search path to the preprocessor.  \n    # ----------------------------------------------------------------------\n\n    def add_path(self,path):\n        &#34;&#34;&#34;Adds a search path to the preprocessor. &#34;&#34;&#34;\n        self.path.append(path)\n        # If the search path being added is relative, or has a common ancestor to the\n        # current working directory, add a rewrite to relativise includes from this\n        # search path\n        relpath = None\n        try:\n            relpath = os.path.relpath(path)\n        except: pass\n        if relpath is not None:\n            self.rewrite_paths += [(re.escape(os.path.abspath(path) + os.sep) + &#39;(.*)&#39;, os.path.join(relpath, &#39;\\\\1&#39;))]\n\n\n    # ----------------------------------------------------------------------\n    # group_lines()\n    #\n    # Given an input string, this function splits it into lines.  Trailing whitespace\n    # is removed. This function forms the lowest level of the preprocessor---grouping\n    # text into a line-by-line format.\n    # ----------------------------------------------------------------------\n\n    def group_lines(self,input,abssource):\n        r&#34;&#34;&#34;Given an input string, this function splits it into lines.  Trailing whitespace\n        is removed. This function forms the lowest level of the preprocessor---grouping\n        text into a line-by-line format.\n        &#34;&#34;&#34;\n        lex = self.lexer.clone()\n        lines = [x.rstrip() for x in input.splitlines()]\n\n        input = &#34;\\n&#34;.join(lines)\n        lex.input(input)\n        lex.lineno = 1\n\n        current_line = []\n        while True:\n            tok = lex.token()\n            if not tok:\n                break\n            tok.source = abssource\n            current_line.append(tok)\n            if tok.type in self.t_WS and tok.value == &#39;\\n&#39;:\n                yield current_line\n                current_line = []\n\n        if current_line:\n            nltok = copy.copy(current_line[-1])\n            nltok.type = self.t_NEWLINE\n            nltok.value = &#39;\\n&#39;\n            current_line.append(nltok)\n            yield current_line\n\n    # ----------------------------------------------------------------------\n    # tokenstrip()\n    # \n    # Remove leading/trailing whitespace tokens from a token list\n    # ----------------------------------------------------------------------\n\n    def tokenstrip(self,tokens):\n        &#34;&#34;&#34;Remove leading/trailing whitespace tokens from a token list&#34;&#34;&#34;\n        i = 0\n        while i &lt; len(tokens) and tokens[i].type in self.t_WS:\n            i += 1\n        del tokens[:i]\n        i = len(tokens)-1\n        while i &gt;= 0 and tokens[i].type in self.t_WS:\n            i -= 1\n        del tokens[i+1:]\n        return tokens\n\n\n    # ----------------------------------------------------------------------\n    # collect_args()\n    #\n    # Collects comma separated arguments from a list of tokens.   The arguments\n    # must be enclosed in parenthesis.  Returns a tuple (tokencount,args,positions)\n    # where tokencount is the number of tokens consumed, args is a list of arguments,\n    # and positions is a list of integers containing the starting index of each\n    # argument.  Each argument is represented by a list of tokens.\n    #\n    # When collecting arguments, leading and trailing whitespace is removed\n    # from each argument.  \n    #\n    # This function properly handles nested parenthesis and commas---these do not\n    # define new arguments.\n    # ----------------------------------------------------------------------\n\n    def collect_args(self,tokenlist,ignore_errors=False):\n        &#34;&#34;&#34;Collects comma separated arguments from a list of tokens.   The arguments\n        must be enclosed in parenthesis.  Returns a tuple (tokencount,args,positions)\n        where tokencount is the number of tokens consumed, args is a list of arguments,\n        and positions is a list of integers containing the starting index of each\n        argument.  Each argument is represented by a list of tokens.\n        \n        When collecting arguments, leading and trailing whitespace is removed\n        from each argument.  \n        \n        This function properly handles nested parenthesis and commas---these do not\n        define new arguments.&#34;&#34;&#34;\n        args = []\n        positions = []\n        current_arg = []\n        nesting = 1\n        tokenlen = len(tokenlist)\n    \n        # Search for the opening &#39;(&#39;.\n        i = 0\n        while (i &lt; tokenlen) and (tokenlist[i].type in self.t_WS):\n            i += 1\n\n        if (i &lt; tokenlen) and (tokenlist[i].value == &#39;(&#39;):\n            positions.append(i+1)\n        else:\n            if not ignore_errors:\n                self.on_error(tokenlist[0].source,tokenlist[0].lineno,&#34;Missing &#39;(&#39; in macro arguments&#34;)\n            return 0, [], []\n\n        i += 1\n\n        while i &lt; tokenlen:\n            t = tokenlist[i]\n            if t.value == &#39;(&#39;:\n                current_arg.append(t)\n                nesting += 1\n            elif t.value == &#39;)&#39;:\n                nesting -= 1\n                if nesting == 0:\n                    args.append(self.tokenstrip(current_arg))\n                    positions.append(i)\n                    return i+1,args,positions\n                current_arg.append(t)\n            elif t.value == &#39;,&#39; and nesting == 1:\n                args.append(self.tokenstrip(current_arg))\n                positions.append(i+1)\n                current_arg = []\n            else:\n                current_arg.append(t)\n            i += 1\n    \n        # Missing end argument\n        if not ignore_errors:\n            self.on_error(tokenlist[-1].source,tokenlist[-1].lineno,&#34;Missing &#39;)&#39; in macro arguments&#34;)\n        return 0, [],[]\n\n    # ----------------------------------------------------------------------\n    # macro_prescan()\n    #\n    # Examine the macro value (token sequence) and identify patch points\n    # This is used to speed up macro expansion later on---we&#39;ll know\n    # right away where to apply patches to the value to form the expansion\n    # ----------------------------------------------------------------------\n    \n    def macro_prescan(self,macro):\n        &#34;&#34;&#34;Examine the macro value (token sequence) and identify patch points\n        This is used to speed up macro expansion later on---we&#39;ll know\n        right away where to apply patches to the value to form the expansion&#34;&#34;&#34;\n        macro.patch     = []             # Standard macro arguments \n        macro.str_patch = []             # String conversion expansion\n        macro.var_comma_patch = []       # Variadic macro comma patch\n        i = 0\n        #print(&#34;BEFORE&#34;, macro.value)\n        #print(&#34;BEFORE&#34;, [x.value for x in macro.value])\n        while i &lt; len(macro.value):\n            if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:\n                argnum = macro.arglist.index(macro.value[i].value)\n                # Conversion of argument to a string\n                j = i - 1\n                while j &gt;= 0 and macro.value[j].type in self.t_WS:\n                    j -= 1\n                if j &gt;= 0 and macro.value[j].value == &#39;#&#39;:\n                    macro.value[i] = copy.copy(macro.value[i])\n                    macro.value[i].type = self.t_STRING\n                    while i &gt; j:\n                        del macro.value[j]\n                        i -= 1\n                    macro.str_patch.append((argnum,i))\n                    continue\n                # Concatenation\n                elif (i &gt; 0 and macro.value[i-1].value == &#39;##&#39;):\n                    macro.patch.append((&#39;t&#39;,argnum,i))\n                    i += 1\n                    continue\n                elif ((i+1) &lt; len(macro.value) and macro.value[i+1].value == &#39;##&#39;):\n                    macro.patch.append((&#39;t&#39;,argnum,i))\n                    i += 1\n                    continue\n                # Standard expansion\n                else:\n                    macro.patch.append((&#39;e&#39;,argnum,i))\n            elif macro.value[i].value == &#39;##&#39;:\n                if macro.variadic and (i &gt; 0) and (macro.value[i-1].value == &#39;,&#39;) and \\\n                        ((i+1) &lt; len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \\\n                        (macro.value[i+1].value == macro.vararg):\n                    macro.var_comma_patch.append(i-1)\n            i += 1\n        macro.patch.sort(key=lambda x: x[2],reverse=True)\n        #print(&#34;AFTER&#34;, macro.value)\n        #print(&#34;AFTER&#34;, [x.value for x in macro.value])\n        #print(macro.patch)\n\n    # ----------------------------------------------------------------------\n    # macro_expand_args()\n    #\n    # Given a Macro and list of arguments (each a token list), this method\n    # returns an expanded version of a macro.  The return value is a token sequence\n    # representing the replacement macro tokens\n    # ----------------------------------------------------------------------\n\n    def macro_expand_args(self,macro,args):\n        &#34;&#34;&#34;Given a Macro and list of arguments (each a token list), this method\n        returns an expanded version of a macro.  The return value is a token sequence\n        representing the replacement macro tokens&#34;&#34;&#34;\n        # Make a copy of the macro token sequence\n        rep = [copy.copy(_x) for _x in macro.value]\n\n        # Make string expansion patches.  These do not alter the length of the replacement sequence\n        str_expansion = {}\n        for argnum, i in macro.str_patch:\n            if argnum not in str_expansion:\n                # Strip all non-space whitespace before stringization\n                tokens = copy.copy(args[argnum])\n                for j in xrange(len(tokens)):\n                    if tokens[j].type in self.t_WS and tokens[j].type != self.t_LINECONT:\n                        tokens[j].value = &#39; &#39;\n                # Collapse all multiple whitespace too\n                j = 0\n                while j &lt; len(tokens) - 1:\n                    if tokens[j].type in self.t_WS and tokens[j+1].type in self.t_WS:\n                        del tokens[j+1]\n                    else:\n                        j += 1\n                str = &#34;&#34;.join([x.value for x in tokens])\n                str = str.replace(&#34;\\\\&#34;,&#34;\\\\\\\\&#34;).replace(&#39;&#34;&#39;, &#39;\\\\&#34;&#39;)\n                str_expansion[argnum] = &#39;&#34;&#39; + str + &#39;&#34;&#39;\n            rep[i] = copy.copy(rep[i])\n            rep[i].value = str_expansion[argnum]\n\n        # Make the variadic macro comma patch.  If the variadic macro argument is empty, we get rid\n        comma_patch = False\n        if macro.variadic and not args[-1]:\n            for i in macro.var_comma_patch:\n                rep[i] = None\n                comma_patch = True\n\n        # Make all other patches.   The order of these matters.  It is assumed that the patch list\n        # has been sorted in reverse order of patch location since replacements will cause the\n        # size of the replacement sequence to expand from the patch point.\n        \n        expanded = { }\n        #print(&#34;***&#34;, macro)\n        #print(macro.patch)\n        for ptype, argnum, i in macro.patch:\n            #print([x.value for x in rep])\n            # Concatenation.   Argument is left unexpanded\n            if ptype == &#39;t&#39;:\n                rep[i:i+1] = args[argnum]\n            # Normal expansion.  Argument is macro expanded first\n            elif ptype == &#39;e&#39;:\n                #print(&#39;*** Function macro arg&#39;, rep[i], &#39;replace with&#39;, args[argnum], &#39;which expands into&#39;, self.expand_macros(copy.copy(args[argnum])))\n                if argnum not in expanded:\n                    expanded[argnum] = self.expand_macros(copy.copy(args[argnum]))\n                rep[i:i+1] = expanded[argnum]\n\n        # Get rid of removed comma if necessary\n        if comma_patch:\n            rep = [_i for _i in rep if _i]\n            \n        # Do a token concatenation pass, stitching any tokens separated by ## into a single token\n        while len(rep) and rep[0].type == self.t_DPOUND:\n            del rep[0]\n        while len(rep) and rep[-1].type == self.t_DPOUND:\n            del rep[-1]\n        i = 1\n        stitched = False\n        while i &lt; len(rep) - 1:\n            if rep[i].type == self.t_DPOUND:\n                j = i + 1\n                while rep[j].type == self.t_DPOUND:\n                    j += 1\n                rep[i-1] = copy.copy(rep[i-1])\n                rep[i-1].type = None\n                rep[i-1].value += rep[j].value\n                while j &gt;= i:\n                    del rep[i]\n                    j -= 1\n                stitched = True\n            else:\n                i += 1\n        if stitched:\n            # Stitched tokens will have unknown type, so figure those out now\n            i = 0\n            lex = self.lexer.clone()\n            while i &lt; len(rep):\n                if rep[i].type is None:\n                    lex.input(rep[i].value)\n                    toks = []\n                    while True:\n                        tok = lex.token()\n                        if not tok:\n                            break\n                        toks.append(tok)\n                    if len(toks) != 1:\n                        # Split it once again\n                        while len(toks) &gt; 1:\n                            rep.insert(i+1, copy.copy(rep[i]))\n                            rep[i+1].value = toks[-1].value\n                            rep[i+1].type = toks[-1].type\n                            toks.pop()\n                        rep[i].value = toks[0].value\n                        rep[i].type = toks[0].type\n                    else:\n                        rep[i].type = toks[0].type\n                i += 1\n\n        #print rep\n        return rep\n\n\n    # ----------------------------------------------------------------------\n    # expand_macros()\n    #\n    # Given a list of tokens, this function performs macro expansion.\n    # ----------------------------------------------------------------------\n\n    def expand_macros(self,tokens,expanding_from=[]):\n        &#34;&#34;&#34;Given a list of tokens, this function performs macro expansion.&#34;&#34;&#34;\n        # Each token needs to track from which macros it has been expanded from to prevent recursion\n        for tok in tokens:\n            if not hasattr(tok, &#39;expanded_from&#39;):\n                tok.expanded_from = []\n        i = 0\n        #print(&#34;*** EXPAND MACROS in&#34;, &#34;&#34;.join([t.value for t in tokens]), &#34;expanding_from=&#34;, expanding_from)\n        #print(tokens)\n        #print([(t.value, t.expanded_from) for t in tokens])\n        while i &lt; len(tokens):\n            t = tokens[i]\n            if self.linemacrodepth == 0:\n                self.linemacro = t.lineno\n            self.linemacrodepth = self.linemacrodepth + 1\n            if t.type == self.t_ID:\n                if t.value in self.macros and t.value not in t.expanded_from and t.value not in expanding_from:\n                    # Yes, we found a macro match\n                    m = self.macros[t.value]\n                    if m.arglist is None:\n                        # A simple macro\n                        rep = [copy.copy(_x) for _x in m.value]\n                        ex = self.expand_macros(rep, expanding_from + [t.value])\n                        #print(&#34;\\nExpanding macro&#34;, m, &#34;\\ninto&#34;, ex, &#34;\\nreplacing&#34;, tokens[i:i+1])\n                        for e in ex:\n                            e.source = t.source\n                            e.lineno = t.lineno\n                            if not hasattr(e, &#39;expanded_from&#39;):\n                                e.expanded_from = []\n                            e.expanded_from.append(t.value)\n                        tokens[i:i+1] = ex\n                    else:\n                        # A macro with arguments\n                        j = i + 1\n                        while j &lt; len(tokens) and (tokens[j].type in self.t_WS or tokens[j].type in self.t_COMMENT):\n                            j += 1\n                        # A function like macro without an invocation list is to be ignored\n                        if j == len(tokens) or tokens[j].value != &#39;(&#39;:\n                            i = j\n                        else:\n                            tokcount,args,positions = self.collect_args(tokens[j:], True)\n                            if tokcount == 0:\n                                # Unclosed parameter list, just bail out\n                                break\n                            if (not m.variadic\n                                # A no arg or single arg consuming macro is permitted to be expanded with nothing\n                                and (args != [[]] or len(m.arglist) &gt; 1)\n                                and len(args) !=  len(m.arglist)):\n                                self.on_error(t.source,t.lineno,&#34;Macro %s requires %d arguments but was passed %d&#34; % (t.value,len(m.arglist),len(args)))\n                                i = j + tokcount\n                            elif m.variadic and len(args) &lt; len(m.arglist)-1:\n                                if len(m.arglist) &gt; 2:\n                                    self.on_error(t.source,t.lineno,&#34;Macro %s must have at least %d arguments&#34; % (t.value, len(m.arglist)-1))\n                                else:\n                                    self.on_error(t.source,t.lineno,&#34;Macro %s must have at least %d argument&#34; % (t.value, len(m.arglist)-1))\n                                i = j + tokcount\n                            else:\n                                if m.variadic:\n                                    if len(args) == len(m.arglist)-1:\n                                        args.append([])\n                                    else:\n                                        args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]\n                                        del args[len(m.arglist):]\n                                else:\n                                    # If we called a single arg macro with empty, fake extend args\n                                    while len(args) &lt; len(m.arglist):\n                                        args.append([])\n                                        \n                                # Get macro replacement text\n                                rep = self.macro_expand_args(m,args)\n                                ex = self.expand_macros(rep, expanding_from + [t.value])\n                                for e in ex:\n                                    e.source = t.source\n                                    e.lineno = t.lineno\n                                    if not hasattr(e, &#39;expanded_from&#39;):\n                                        e.expanded_from = []\n                                    e.expanded_from.append(t.value)\n                                # A non-conforming extension implemented by the GCC and clang preprocessors\n                                # is that an expansion of a macro with arguments where the following token is\n                                # an identifier inserts a space between the expansion and the identifier. This\n                                # differs from Boost.Wave incidentally (see https://github.com/ned14/pcpp/issues/29)\n                                if len(tokens) &gt; j+tokcount and tokens[j+tokcount].type in self.t_ID:\n                                    #print(&#34;*** token after expansion is&#34;, tokens[j+tokcount])\n                                    newtok = copy.copy(tokens[j+tokcount])\n                                    newtok.type = self.t_SPACE\n                                    newtok.value = &#39; &#39;\n                                    ex.append(newtok)\n                                #print(&#34;\\nExpanding macro&#34;, m, &#34;\\n\\ninto&#34;, ex, &#34;\\n\\nreplacing&#34;, tokens[i:j+tokcount])\n                                tokens[i:j+tokcount] = ex\n                    self.linemacrodepth = self.linemacrodepth - 1\n                    if self.linemacrodepth == 0:\n                        self.linemacro = 0\n                    continue\n                elif self.expand_linemacro and t.value == &#39;__LINE__&#39;:\n                    t.type = self.t_INTEGER\n                    t.value = self.t_INTEGER_TYPE(self.linemacro)\n                elif self.expand_countermacro and t.value == &#39;__COUNTER__&#39;:\n                    t.type = self.t_INTEGER\n                    t.value = self.t_INTEGER_TYPE(self.countermacro)\n                    self.countermacro += 1\n                \n            i += 1\n            self.linemacrodepth = self.linemacrodepth - 1\n            if self.linemacrodepth == 0:\n                self.linemacro = 0\n        return tokens\n\n    # ----------------------------------------------------------------------    \n    # evalexpr()\n    # \n    # Evaluate an expression token sequence for the purposes of evaluating\n    # integral expressions.\n    # ----------------------------------------------------------------------\n\n    def evalexpr(self,tokens):\n        &#34;&#34;&#34;Evaluate an expression token sequence for the purposes of evaluating\n        integral expressions.&#34;&#34;&#34;\n        if not tokens:\n            self.on_error(&#39;unknown&#39;, 0, &#34;Empty expression&#34;)\n            return (0, None)\n        # tokens = tokenize(line)\n        # Search for defined macros\n        partial_expansion = False\n        def replace_defined(tokens):\n            i = 0\n            while i &lt; len(tokens):\n                if tokens[i].type == self.t_ID and tokens[i].value == &#39;defined&#39;:\n                    j = i + 1\n                    needparen = False\n                    result = &#34;0L&#34;\n                    while j &lt; len(tokens):\n                        if tokens[j].type in self.t_WS:\n                            j += 1\n                            continue\n                        elif tokens[j].type == self.t_ID:\n                            if tokens[j].value in self.macros:\n                                result = &#34;1L&#34;\n                            else:\n                                repl = self.on_unknown_macro_in_defined_expr(tokens[j])\n                                if repl is None:\n                                    partial_expansion = True\n                                    result = &#39;defined(&#39;+tokens[j].value+&#39;)&#39;\n                                else:\n                                    result = &#34;1L&#34; if repl else &#34;0L&#34;\n                            if not needparen: break\n                        elif tokens[j].value == &#39;(&#39;:\n                            needparen = True\n                        elif tokens[j].value == &#39;)&#39;:\n                            break\n                        else:\n                            self.on_error(tokens[i].source,tokens[i].lineno,&#34;Malformed defined()&#34;)\n                        j += 1\n                    if result.startswith(&#39;defined&#39;):\n                        tokens[i].type = self.t_ID\n                        tokens[i].value = result\n                    else:\n                        tokens[i].type = self.t_INTEGER\n                        tokens[i].value = self.t_INTEGER_TYPE(result)\n                    del tokens[i+1:j+1]\n                i += 1\n            return tokens\n        # Replace any defined(macro) before macro expansion\n        tokens = replace_defined(tokens)\n        tokens = self.expand_macros(tokens)\n        # Replace any defined(macro) after macro expansion\n        tokens = replace_defined(tokens)\n        if not tokens:\n            return (0, None)\n        class IndirectToMacroHook(object):\n            def __init__(self, p):\n                self.__preprocessor = p\n                self.partial_expansion = False\n            def __contains__(self, key):\n                return True\n            def __getitem__(self, key):\n                if key.startswith(&#39;defined(&#39;):\n                    self.partial_expansion = True\n                    return 0\n                repl = self.__preprocessor.on_unknown_macro_in_expr(key)\n                #print(&#34;*** IndirectToMacroHook[&#34;, key, &#34;] returns&#34;, repl, file = sys.stderr)\n                if repl is None:\n                    self.partial_expansion = True\n                    return key\n                return repl\n        evalvars = IndirectToMacroHook(self)\n        class IndirectToMacroFunctionHook(object):\n            def __init__(self, p):\n                self.__preprocessor = p\n                self.partial_expansion = False\n            def __contains__(self, key):\n                return True\n            def __getitem__(self, key):\n                repl = self.__preprocessor.on_unknown_macro_function_in_expr(key)\n                #print(&#34;*** IndirectToMacroFunctionHook[&#34;, key, &#34;] returns&#34;, repl, file = sys.stderr)\n                if repl is None:\n                    self.partial_expansion = True\n                    return key\n                return repl\n        evalfuncts = IndirectToMacroFunctionHook(self)\n        try:\n            result = self.evaluator(tokens, functions = evalfuncts, identifiers = evalvars).value()\n            partial_expansion = partial_expansion or evalvars.partial_expansion or evalfuncts.partial_expansion\n        except OutputDirective:\n            raise\n        except Exception as e:\n            partial_expansion = partial_expansion or evalvars.partial_expansion or evalfuncts.partial_expansion\n            if not partial_expansion:\n                self.on_error(tokens[0].source,tokens[0].lineno,&#34;Could not evaluate expression due to %s (passed to evaluator: &#39;%s&#39;)&#34; % (repr(e), &#39;&#39;.join([tok.value for tok in tokens])))\n            result = 0\n        return (result, tokens) if partial_expansion else (result, None)\n\n    # ----------------------------------------------------------------------\n    # parsegen()\n    #\n    # Parse an input string\n    # ----------------------------------------------------------------------\n    def parsegen(self,input,source=None,abssource=None):\n        &#34;&#34;&#34;Parse an input string&#34;&#34;&#34;\n        rewritten_source = source\n        if abssource:\n            rewritten_source = abssource\n            for rewrite in self.rewrite_paths:\n                temp = re.sub(rewrite[0], rewrite[1], rewritten_source)\n                if temp != abssource:\n                    rewritten_source = temp\n                    if os.sep != &#39;/&#39;:\n                        rewritten_source = rewritten_source.replace(os.sep, &#39;/&#39;)\n                    break\n\n        # Replace trigraph sequences\n        t = trigraph(input)\n        lines = self.group_lines(t, rewritten_source)\n\n        if not source:\n            source = &#34;&#34;\n        if not rewritten_source:\n            rewritten_source = &#34;&#34;\n            \n        my_include_times_idx = len(self.include_times)\n        self.include_times.append(FileInclusionTime(self.macros[&#39;__FILE__&#39;] if &#39;__FILE__&#39; in self.macros else None, source, abssource, self.include_depth))\n        self.include_depth += 1\n        my_include_time_begin = clock()\n        if self.expand_filemacro:\n            self.define(&#34;__FILE__ \\&#34;%s\\&#34;&#34; % rewritten_source)\n\n        self.source = abssource\n        chunk = []\n        enable = True\n        iftrigger = False\n        ifpassthru = False\n        class ifstackentry(object):\n            def __init__(self,enable,iftrigger,ifpassthru,startlinetoks):\n                self.enable = enable\n                self.iftrigger = iftrigger\n                self.ifpassthru = ifpassthru\n                self.rewritten = False\n                self.startlinetoks = startlinetoks\n        ifstack = []\n        # True until any non-whitespace output or anything with effects happens.\n        at_front_of_file = True\n        # True if auto pragma once still a possibility for this #include\n        auto_pragma_once_possible = self.auto_pragma_once_enabled\n        # =(MACRO, 0) means #ifndef MACRO or #if !defined(MACRO) seen, =(MACRO,1) means #define MACRO seen\n        include_guard = None\n        self.on_potential_include_guard(None)\n\n        for x in lines:\n            all_whitespace = True\n            skip_auto_pragma_once_possible_check = False\n            # Handle comments\n            for i,tok in enumerate(x):\n                if tok.type in self.t_COMMENT:\n                    if not self.on_comment(tok):\n                        if tok.type == self.t_COMMENT1:\n                            tok.value = &#39; &#39;\n                        elif tok.type == self.t_COMMENT2:\n                            tok.value = &#39;\\n&#39;\n                        tok.type = &#39;CPP_WS&#39;\n            # Skip over whitespace\n            for i,tok in enumerate(x):\n                if tok.type not in self.t_WS and tok.type not in self.t_COMMENT:\n                    all_whitespace = False\n                    break\n            output_and_expand_line = True\n            output_unexpanded_line = False\n            if tok.value == &#39;#&#39;:\n                precedingtoks = [ tok ]\n                output_and_expand_line = False\n                try:\n                    # Preprocessor directive      \n                    i += 1\n                    while i &lt; len(x) and x[i].type in self.t_WS:\n                        precedingtoks.append(x[i])\n                        i += 1                    \n                    dirtokens = self.tokenstrip(x[i:])\n                    if dirtokens:\n                        name = dirtokens[0].value\n                        args = self.tokenstrip(dirtokens[1:])\n                    \n                        if self.debugout is not None:\n                            print(&#34;%d:%d:%d %s:%d #%s %s&#34; % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno, dirtokens[0].value, &#34;&#34;.join([tok.value for tok in args])), file = self.debugout)\n                            #print(ifstack)\n\n                        handling = self.on_directive_handle(dirtokens[0],args,ifpassthru,precedingtoks)\n                        assert handling == True or handling == None\n                    else:\n                        name = &#34;&#34;\n                        args = []\n                        raise OutputDirective(Action.IgnoreAndRemove)\n                        \n                    if name == &#39;define&#39;:\n                        at_front_of_file = False\n                        if enable:\n                            for tok in self.expand_macros(chunk):\n                                yield tok\n                            chunk = []\n                            if include_guard and include_guard[1] == 0:\n                                if include_guard[0] == args[0].value and len(args) == 1:\n                                    include_guard = (args[0].value, 1)\n                                    # If ifpassthru is only turned on due to this include guard, turn it off\n                                    if ifpassthru and not ifstack[-1].ifpassthru:\n                                        ifpassthru = False\n                            self.define(args)\n                            if self.debugout is not None:\n                                print(&#34;%d:%d:%d %s:%d      %s&#34; % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno, repr(self.macros[args[0].value])), file = self.debugout)\n                            if handling is None:\n                                for tok in x:\n                                    yield tok\n                    elif name == &#39;include&#39;:\n                        if enable:\n                            for tok in self.expand_macros(chunk):\n                                yield tok\n                            chunk = []\n                            oldfile = self.macros[&#39;__FILE__&#39;] if &#39;__FILE__&#39; in self.macros else None\n                            if args and args[0].value != &#39;&lt;&#39; and args[0].type != self.t_STRING:\n                                args = self.tokenstrip(self.expand_macros(args))\n                            #print(&#39;***&#39;, &#39;&#39;.join([x.value for x in args]), file = sys.stderr)\n                            if self.passthru_includes is not None and self.passthru_includes.match(&#39;&#39;.join([x.value for x in args])):\n                                for tok in precedingtoks:\n                                    yield tok\n                                for tok in dirtokens:\n                                    yield tok\n                                for tok in self.include(args):\n                                    pass\n                            else:\n                                for tok in self.include(args):\n                                    yield tok\n                            if oldfile is not None:\n                                self.macros[&#39;__FILE__&#39;] = oldfile\n                            self.source = abssource\n                    elif name == &#39;undef&#39;:\n                        at_front_of_file = False\n                        if enable:\n                            for tok in self.expand_macros(chunk):\n                                yield tok\n                            chunk = []\n                            self.undef(args)\n                            if handling is None:\n                                for tok in x:\n                                    yield tok\n                    elif name == &#39;ifdef&#39;:\n                        at_front_of_file = False\n                        ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                        if enable:\n                            ifpassthru = False\n                            if not args[0].value in self.macros:\n                                res = self.on_unknown_macro_in_defined_expr(args[0])\n                                if res is None:\n                                    ifpassthru = True\n                                    ifstack[-1].rewritten = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                elif res is True:\n                                    iftrigger = True\n                                else:\n                                    enable = False\n                                    iftrigger = False\n                            else:\n                                iftrigger = True\n                    elif name == &#39;ifndef&#39;:\n                        if not ifstack and at_front_of_file:\n                            self.on_potential_include_guard(args[0].value)\n                            include_guard = (args[0].value, 0)\n                        at_front_of_file = False\n                        ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                        if enable:\n                            ifpassthru = False\n                            if args[0].value in self.macros:\n                                enable = False\n                                iftrigger = False\n                            else:\n                                res = self.on_unknown_macro_in_defined_expr(args[0])\n                                if res is None:\n                                    ifpassthru = True\n                                    ifstack[-1].rewritten = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                elif res is True:\n                                    enable = False\n                                    iftrigger = False\n                                else:\n                                    iftrigger = True\n                    elif name == &#39;if&#39;:\n                        if not ifstack and at_front_of_file:\n                            if args[0].value == &#39;!&#39; and args[1].value == &#39;defined&#39;:\n                                n = 2\n                                if args[n].value == &#39;(&#39;: n += 1\n                                self.on_potential_include_guard(args[n].value)\n                                include_guard = (args[n].value, 0)\n                        at_front_of_file = False\n                        ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                        if enable:\n                            iftrigger = False\n                            ifpassthru = False\n                            result, rewritten = self.evalexpr(args)\n                            if rewritten is not None:\n                                x = x[:i+2] + rewritten + [x[-1]]\n                                x[i+1] = copy.copy(x[i+1])\n                                x[i+1].type = self.t_SPACE\n                                x[i+1].value = &#39; &#39;\n                                ifpassthru = True\n                                ifstack[-1].rewritten = True\n                                raise OutputDirective(Action.IgnoreAndPassThrough)\n                            if not result:\n                                enable = False\n                            else:\n                                iftrigger = True\n                    elif name == &#39;elif&#39;:\n                        at_front_of_file = False\n                        if ifstack:\n                            if ifstack[-1].enable:     # We only pay attention if outer &#34;if&#34; allows this\n                                if enable and not ifpassthru:         # If already true, we flip enable False\n                                    enable = False\n                                elif not iftrigger:   # If False, but not triggered yet, we&#39;ll check expression\n                                    result, rewritten = self.evalexpr(args)\n                                    if rewritten is not None:\n                                        enable = True\n                                        if not ifpassthru:\n                                            # This is a passthru #elif after a False #if, so convert to an #if\n                                            x[i].value = &#39;if&#39;\n                                        x = x[:i+2] + rewritten + [x[-1]]\n                                        x[i+1] = copy.copy(x[i+1])\n                                        x[i+1].type = self.t_SPACE\n                                        x[i+1].value = &#39; &#39;\n                                        ifpassthru = True\n                                        ifstack[-1].rewritten = True\n                                        raise OutputDirective(Action.IgnoreAndPassThrough)\n                                    if ifpassthru:\n                                        # If this elif can only ever be true, simulate that\n                                        if result:\n                                            newtok = copy.copy(x[i+3])\n                                            newtok.type = self.t_INTEGER\n                                            newtok.value = self.t_INTEGER_TYPE(result)\n                                            x = x[:i+2] + [newtok] + [x[-1]]\n                                            raise OutputDirective(Action.IgnoreAndPassThrough)\n                                        # Otherwise elide\n                                        enable = False\n                                    elif result:\n                                        enable  = True\n                                        iftrigger = True\n                        else:\n                            self.on_error(dirtokens[0].source,dirtokens[0].lineno,&#34;Misplaced #elif&#34;)\n                            \n                    elif name == &#39;else&#39;:\n                        at_front_of_file = False\n                        if ifstack:\n                            if ifstack[-1].enable:\n                                if ifpassthru:\n                                    enable = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                if enable:\n                                    enable = False\n                                elif not iftrigger:\n                                    enable = True\n                                    iftrigger = True\n                        else:\n                            self.on_error(dirtokens[0].source,dirtokens[0].lineno,&#34;Misplaced #else&#34;)\n\n                    elif name == &#39;endif&#39;:\n                        at_front_of_file = False\n                        if ifstack:\n                            oldifstackentry = ifstack.pop()\n                            enable = oldifstackentry.enable\n                            iftrigger = oldifstackentry.iftrigger\n                            ifpassthru = oldifstackentry.ifpassthru\n                            if self.debugout is not None:\n                                print(&#34;%d:%d:%d %s:%d      (%s:%d %s)&#34; % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno,\n                                    oldifstackentry.startlinetoks[0].source, oldifstackentry.startlinetoks[0].lineno, &#34;&#34;.join([n.value for n in oldifstackentry.startlinetoks])), file = self.debugout)\n                            skip_auto_pragma_once_possible_check = True\n                            if oldifstackentry.rewritten:\n                                raise OutputDirective(Action.IgnoreAndPassThrough)\n                        else:\n                            self.on_error(dirtokens[0].source,dirtokens[0].lineno,&#34;Misplaced #endif&#34;)\n                    elif name == &#39;pragma&#39; and args[0].value == &#39;once&#39;:\n                        if enable:\n                            self.include_once[self.source] = None\n                    elif enable:\n                        # Unknown preprocessor directive\n                        output_unexpanded_line = (self.on_directive_unknown(dirtokens[0], args, ifpassthru, precedingtoks) is None)\n\n                except OutputDirective as e:\n                    if e.action == Action.IgnoreAndPassThrough:\n                        output_unexpanded_line = True\n                    elif e.action == Action.IgnoreAndRemove:\n                        pass\n                    else:\n                        assert False\n\n            # If there is ever any non-whitespace output outside an include guard, auto pragma once is not possible\n            if not skip_auto_pragma_once_possible_check and auto_pragma_once_possible and not ifstack and not all_whitespace:\n                auto_pragma_once_possible = False\n                if self.debugout is not None:\n                    print(&#34;%d:%d:%d %s:%d Determined that #include \\&#34;%s\\&#34; is not entirely wrapped in an include guard macro, disabling auto-applying #pragma once&#34; % (enable, iftrigger, ifpassthru, x[0].source, x[0].lineno, self.source), file = self.debugout)\n                \n            if output_and_expand_line or output_unexpanded_line:\n                if not all_whitespace:\n                    at_front_of_file = False\n\n                # Normal text\n                if enable:\n                    if output_and_expand_line:\n                        chunk.extend(x)\n                    elif output_unexpanded_line:\n                        for tok in self.expand_macros(chunk):\n                            yield tok\n                        chunk = []\n                        for tok in x:\n                            yield tok\n                else:\n                    # Need to extend with the same number of blank lines\n                    i = 0\n                    while i &lt; len(x):\n                        if x[i].type not in self.t_WS:\n                            del x[i]\n                        else:\n                            i += 1\n                    chunk.extend(x)\n\n        for tok in self.expand_macros(chunk):\n            yield tok\n        chunk = []\n        for i in ifstack:\n            self.on_error(i.startlinetoks[0].source, i.startlinetoks[0].lineno, &#34;Unterminated &#34; + &#34;&#34;.join([n.value for n in i.startlinetoks]))\n        if auto_pragma_once_possible and include_guard and include_guard[1] == 1:\n            if self.debugout is not None:\n                print(&#34;%d:%d:%d %s:%d Determined that #include \\&#34;%s\\&#34; is entirely wrapped in an include guard macro called %s, auto-applying #pragma once&#34; % (enable, iftrigger, ifpassthru, self.source, 0, self.source, include_guard[0]), file = self.debugout)\n            self.include_once[self.source] = include_guard[0]\n        elif self.auto_pragma_once_enabled and self.source not in self.include_once:\n            if self.debugout is not None:\n                print(&#34;%d:%d:%d %s:%d Did not auto apply #pragma once to this file due to auto_pragma_once_possible=%d, include_guard=%s&#34; % (enable, iftrigger, ifpassthru, self.source, 0, auto_pragma_once_possible, repr(include_guard)), file = self.debugout)\n        my_include_time_end = clock()\n        self.include_times[my_include_times_idx].elapsed = my_include_time_end - my_include_time_begin\n        self.include_depth -= 1\n\n    # ----------------------------------------------------------------------\n    # include()\n    #\n    # Implementation of file-inclusion\n    # ----------------------------------------------------------------------\n\n    def include(self,tokens):\n        &#34;&#34;&#34;Implementation of file-inclusion&#34;&#34;&#34;\n        # Try to extract the filename and then process an include file\n        if not tokens:\n            return\n        if tokens:\n            if tokens[0].value != &#39;&lt;&#39; and tokens[0].type != self.t_STRING:\n                tokens = self.tokenstrip(self.expand_macros(tokens))\n\n            is_system_include = False\n            if tokens[0].value == &#39;&lt;&#39;:\n                is_system_include = True\n                # Include &lt;...&gt;\n                i = 1\n                while i &lt; len(tokens):\n                    if tokens[i].value == &#39;&gt;&#39;:\n                        break\n                    i += 1\n                else:\n                    self.on_error(tokens[0].source,tokens[0].lineno,&#34;Malformed #include &lt;...&gt;&#34;)\n                    return\n                filename = &#34;&#34;.join([x.value for x in tokens[1:i]])\n                # Search only formally specified paths\n                path = self.path\n            elif tokens[0].type == self.t_STRING:\n                filename = tokens[0].value[1:-1]\n                # Search from each nested include file, as well as formally specified paths\n                path = self.temp_path + self.path\n            else:\n                p = self.on_include_not_found(True,False,self.temp_path[0] if self.temp_path else &#39;&#39;,tokens[0].value)\n                assert p is None\n                return\n        if not path:\n            path = [&#39;&#39;]\n        while True:\n            #print path\n            for p in path:\n                iname = os.path.join(p,filename)\n                fulliname = os.path.abspath(iname)\n                if fulliname in self.include_once:\n                    if self.debugout is not None:\n                        print(&#34;x:x:x x:x #include \\&#34;%s\\&#34; skipped as already seen&#34; % (fulliname), file = self.debugout)\n                    return\n                try:\n                    ih = self.on_file_open(is_system_include,fulliname)\n                    data = ih.read()\n                    ih.close()\n                    dname = os.path.dirname(fulliname)\n                    if dname:\n                        self.temp_path.insert(0,dname)\n                    for tok in self.parsegen(data,filename,fulliname):\n                        yield tok\n                    if dname:\n                        del self.temp_path[0]\n                    return\n                except IOError:\n                    pass\n            else:\n                p = self.on_include_not_found(False,is_system_include,self.temp_path[0] if self.temp_path else &#39;&#39;,filename)\n                assert p is not None\n                path.append(p)\n\n    # ----------------------------------------------------------------------\n    # define()\n    #\n    # Define a new macro\n    # ----------------------------------------------------------------------\n\n    def define(self,tokens):\n        &#34;&#34;&#34;Define a new macro&#34;&#34;&#34;\n        if isinstance(tokens,STRING_TYPES):\n            tokens = self.tokenize(tokens)\n        else:\n            tokens = [copy.copy(tok) for tok in tokens]\n        def add_macro(self, name, macro):\n            macro.source = name.source\n            macro.lineno = name.lineno\n            self.macros[name.value] = macro\n\n        linetok = tokens\n        try:\n            name = linetok[0]\n            if len(linetok) &gt; 1:\n                mtype = linetok[1]\n            else:\n                mtype = None\n            if not mtype:\n                m = Macro(name.value,[])\n                add_macro(self, name, m)\n            elif mtype.type in self.t_WS:\n                # A normal macro\n                m = Macro(name.value,self.tokenstrip(linetok[2:]))\n                add_macro(self, name, m)\n            elif mtype.value == &#39;(&#39;:\n                # A macro with arguments\n                tokcount, args, positions = self.collect_args(linetok[1:])\n                variadic = False\n                for a in args:\n                    if variadic:\n                        self.on_error(name.source,name.lineno,&#34;No more arguments may follow a variadic argument&#34;)\n                        break\n                    astr = &#34;&#34;.join([str(_i.value) for _i in a])\n                    if astr == &#34;...&#34;:\n                        variadic = True\n                        a[0].type = self.t_ID\n                        a[0].value = &#39;__VA_ARGS__&#39;\n                        variadic = True\n                        del a[1:]\n                        continue\n                    elif astr[-3:] == &#34;...&#34; and a[0].type == self.t_ID:\n                        variadic = True\n                        del a[1:]\n                        # If, for some reason, &#34;.&#34; is part of the identifier, strip off the name for the purposes\n                        # of macro expansion\n                        if a[0].value[-3:] == &#39;...&#39;:\n                            a[0].value = a[0].value[:-3]\n                        continue\n                    # Empty arguments are permitted\n                    if len(a) == 0 and len(args) == 1:\n                        continue\n                    if len(a) &gt; 1 or a[0].type != self.t_ID:\n                        self.on_error(a[0].source,a[0].lineno,&#34;Invalid macro argument&#34;)\n                        break\n                else:\n                    mvalue = self.tokenstrip(linetok[1+tokcount:])\n                    i = 0\n                    while i &lt; len(mvalue):\n                        if i+1 &lt; len(mvalue):\n                            if mvalue[i].type in self.t_WS and mvalue[i+1].value == &#39;##&#39;:\n                                del mvalue[i]\n                                continue\n                            elif mvalue[i].value == &#39;##&#39; and mvalue[i+1].type in self.t_WS:\n                                del mvalue[i+1]\n                        i += 1\n                    m = Macro(name.value,mvalue,[x[0].value for x in args] if args != [[]] else [],variadic)\n                    self.macro_prescan(m)\n                    add_macro(self, name, m)\n            else:\n                self.on_error(name.source,name.lineno,&#34;Bad macro definition&#34;)\n        #except LookupError:\n        #    print(&#34;Bad macro definition&#34;)\n        except:\n            raise\n\n    # ----------------------------------------------------------------------\n    # undef()\n    #\n    # Undefine a macro\n    # ----------------------------------------------------------------------\n\n    def undef(self,tokens):\n        &#34;&#34;&#34;Undefine a macro&#34;&#34;&#34;\n        if isinstance(tokens,STRING_TYPES):\n            tokens = self.tokenize(tokens)\n        id = tokens[0].value\n        try:\n            del self.macros[id]\n        except LookupError:\n            pass\n\n    # ----------------------------------------------------------------------\n    # parse()\n    #\n    # Parse input text.\n    # ----------------------------------------------------------------------\n    def parse(self,input,source=None,ignore={}):\n        &#34;&#34;&#34;Parse input text.&#34;&#34;&#34;\n        if isinstance(input, FILE_TYPES):\n            if source is None:\n                source = input.name\n            input = input.read()\n        self.ignore = ignore\n        self.parser = self.parsegen(input,source,os.path.abspath(source) if source else None)\n        if source is not None:\n            dname = os.path.dirname(source)\n            self.temp_path.insert(0,dname)\n        \n    # ----------------------------------------------------------------------\n    # token()\n    #\n    # Method to return individual tokens\n    # ----------------------------------------------------------------------\n    def token(self):\n        &#34;&#34;&#34;Method to return individual tokens&#34;&#34;&#34;\n        try:\n            while True:\n                tok = next(self.parser)\n                if tok.type not in self.ignore:\n                    return tok\n        except StopIteration:\n            self.parser = None\n            return None\n            \n    def write(self, oh=sys.stdout):\n        &#34;&#34;&#34;Calls token() repeatedly, expanding tokens to their text and writing to the file like stream oh&#34;&#34;&#34;\n        lastlineno = 0\n        lastsource = None\n        done = False\n        blanklines = 0\n        while not done:\n            emitlinedirective = False\n            toks = []\n            all_ws = True\n            # Accumulate a line\n            while not done:\n                tok = self.token()\n                if not tok:\n                    done = True\n                    break\n                toks.append(tok)\n                if tok.value and tok.value[0] == &#39;\\n&#39;:\n                    break\n                if tok.type not in self.t_WS:\n                    all_ws = False\n            if not toks:\n                break\n            if all_ws:\n                # Remove preceding whitespace so it becomes just a LF\n                if len(toks) &gt; 1:\n                    tok = toks[-1]\n                    toks = [ tok ]\n                blanklines += toks[0].value.count(&#39;\\n&#39;)\n                continue\n            # Filter out line continuations, collapsing before and after if needs be\n            for n in xrange(len(toks)-1, -1, -1):\n                if toks[n].type in self.t_LINECONT:\n                    if n &gt; 0 and n &lt; len(toks) - 1 and toks[n-1].type in self.t_WS and toks[n+1].type in self.t_WS:\n                        toks[n-1].value = toks[n-1].value[0]\n                        del toks[n:n+2]\n                    else:\n                        del toks[n]\n            # The line in toks is not all whitespace\n            emitlinedirective = (blanklines &gt; 6) and self.line_directive is not None\n            if hasattr(toks[0], &#39;source&#39;):\n                if lastsource is None:\n                    if toks[0].source is not None:\n                        emitlinedirective = True\n                    lastsource = toks[0].source\n                elif lastsource != toks[0].source:\n                    emitlinedirective = True\n                    lastsource = toks[0].source\n            # Replace consecutive whitespace in output with a single space except at any indent\n            first_ws = None\n            #print(toks)\n            for n in xrange(len(toks)-1, -1, -1):\n                tok = toks[n]\n                if first_ws is None:\n                    if tok.type in self.t_SPACE or len(tok.value) == 0:\n                        first_ws = n\n                else:\n                    if tok.type not in self.t_SPACE and len(tok.value) &gt; 0:\n                        m = n + 1\n                        while m != first_ws:\n                            del toks[m]\n                            first_ws -= 1\n                        first_ws = None\n                        if self.compress &gt; 0:\n                            # Collapse a token of many whitespace into single\n                            if toks[m].value and toks[m].value[0] == &#39; &#39;:\n                                toks[m].value = &#39; &#39;\n            if not self.compress &gt; 1 and not emitlinedirective:\n                newlinesneeded = toks[0].lineno - lastlineno - 1\n                if newlinesneeded &gt; 6 and self.line_directive is not None:\n                    emitlinedirective = True\n                else:\n                    while newlinesneeded &gt; 0:\n                        oh.write(&#39;\\n&#39;)\n                        newlinesneeded -= 1\n            lastlineno = toks[0].lineno\n            # Account for those newlines in a multiline comment\n            if emitlinedirective and self.line_directive is not None:\n                oh.write(self.line_directive + &#39; &#39; + str(lastlineno) + (&#39;&#39; if lastsource is None else (&#39; &#34;&#39; + lastsource + &#39;&#34;&#39; )) + &#39;\\n&#39;)\n            for tok in toks:\n                if tok.type == self.t_COMMENT1:\n                    lastlineno += tok.value.count(&#39;\\n&#39;)\n            blanklines = 0\n            #print toks[0].lineno, \n            for tok in toks:\n                #print tok.value,\n                oh.write(tok.value)\n            #print &#39;&#39;\n\nif __name__ == &#34;__main__&#34;:\n    import doctest\n    doctest.testmod()</code></pre>\n                </details>\n            </section>\n            <section>\n            </section>\n            <section>\n            </section>\n            <section>\n            </section>\n            <section>\n                <h2 class=\"section-title\" id=\"header-classes\">Classes</h2>\n                <dl>\n                    <dt id=\"pcpp.preprocessor.Action\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">Action</span></span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>What kind of abort processing to do in OutputDirective</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class Action(object):\n    &#34;&#34;&#34;What kind of abort processing to do in OutputDirective&#34;&#34;&#34;\n    IgnoreAndPassThrough = 0\n    &#34;&#34;&#34;Abort processing (don&#39;t execute), but pass the directive through to output&#34;&#34;&#34;\n    IgnoreAndRemove = 1\n    &#34;&#34;&#34;Abort processing (don&#39;t execute), and remove from output&#34;&#34;&#34;</code></pre>\n                        </details>\n                        <h3>Class variables</h3>\n                        <dl>\n                            <dt id=\"pcpp.preprocessor.Action.IgnoreAndPassThrough\"><code\n                                    class=\"name\">var <span class=\"ident\">IgnoreAndPassThrough</span></code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Abort processing (don't execute), but pass the directive through to output</p>\n                                </section>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Action.IgnoreAndRemove\"><code\n                                    class=\"name\">var <span class=\"ident\">IgnoreAndRemove</span></code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Abort processing (don't execute), and remove from output</p>\n                                </section>\n                            </dd>\n                        </dl>\n                    </dd>\n                    <dt id=\"pcpp.preprocessor.Evaluator\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">Evaluator</span></span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>Evaluator of #if C preprocessor expressions.</p>\n                            <pre><code>&gt;&gt;&gt; e = Evaluator()\n&gt;&gt;&gt; e('5')\nValue(5)\n&gt;&gt;&gt; e('5+6')\nValue(11)\n&gt;&gt;&gt; e('5+6*2')\nValue(17)\n&gt;&gt;&gt; e('5/2+6*2')\nValue(14)\n&gt;&gt;&gt; e('5 &lt; 6 &lt;= 7')\nValue(1)\n&gt;&gt;&gt; e('5 &lt; 6 &amp;&amp; 8 &gt; 7')\nValue(1)\n&gt;&gt;&gt; e('18446744073709551615 == -1')\nValue(1)\n&gt;&gt;&gt; e('-9223372036854775809 == 9223372036854775807')\nValue(1)\n&gt;&gt;&gt; e('-1 &lt; 0U')\nValue(0U)\n&gt;&gt;&gt; e('(( 0L &amp;&amp; 0) || (!0L &amp;&amp; !0 ))')\nValue(1)\n&gt;&gt;&gt; e('(1)?2:3')\nValue(2)\n&gt;&gt;&gt; e('(1 ? -1 : 0) &lt;= 0')\nValue(1)\n&gt;&gt;&gt; e('(1 ? -1 : 0U)')       # Output type of ? must be common between both choices\nValue(18446744073709551615U)\n&gt;&gt;&gt; e('(1 ? -1 : 0U) &lt;= 0')\nValue(0U)\n&gt;&gt;&gt; e('1 &amp;&amp; 10 / 0')         # doctest: +ELLIPSIS\nException(ZeroDivisionError('division by zero'...\n&gt;&gt;&gt; e('0 &amp;&amp; 10 / 0')         # &amp;&amp; must shortcut\nValue(0)\n&gt;&gt;&gt; e('1 ? 10 / 0 : 0')      # doctest: +ELLIPSIS\nException(ZeroDivisionError('division by zero'...\n&gt;&gt;&gt; e('0 ? 10 / 0 : 0')      # ? must shortcut\nValue(0)\n&gt;&gt;&gt; e('(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1')\nValue(0)\n&gt;&gt;&gt; e('1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 != 4')\nValue(0)\n&gt;&gt;&gt; e('(2 || 3) != 1 || (2 &amp;&amp; 3) != 1 || (0 || 4) != 1 || (0 &amp;&amp; 5) != 0')\nValue(0)\n&gt;&gt;&gt; e('-1 &lt;&lt; 3U &gt; 0')\nValue(0)\n&gt;&gt;&gt; e(\"'N' == 78\")\nValue(1)\n&gt;&gt;&gt; e('0x3f == 63')\nValue(1)\n&gt;&gt;&gt; e(\"'\\\\n'\")\nValue(10)\n&gt;&gt;&gt; e(\"'\\\\\\\\'\")\nValue(92)\n&gt;&gt;&gt; e(\"'\\\\n' == 0xA\")\nValue(1)\n&gt;&gt;&gt; e(\"'\\\\\\\\' == 0x5c\")\nValue(1)\n&gt;&gt;&gt; e(\"L'\\\\0' == 0\")\nValue(1)\n&gt;&gt;&gt; e('12 == 12')\nValue(1)\n&gt;&gt;&gt; e('12L == 12')\nValue(1)\n&gt;&gt;&gt; e('-1 &gt;= 0U')\nValue(1U)\n&gt;&gt;&gt; e('(1&lt;&lt;2) == 4')\nValue(1)\n&gt;&gt;&gt; e('(-!+!9) == -1')\nValue(1)\n&gt;&gt;&gt; e('(2 || 3) == 1')\nValue(1)\n&gt;&gt;&gt; e('1L * 3 != 3')\nValue(0)\n&gt;&gt;&gt; e('(!1L != 0) || (-1L != -1)')\nValue(0)\n&gt;&gt;&gt; e('0177777 == 65535')\nValue(1)\n&gt;&gt;&gt; e('0Xffff != 65535 || 0XFfFf == 65535')\nValue(1)\n&gt;&gt;&gt; e('0L != 0 || 0l != 0')\nValue(0)\n&gt;&gt;&gt; e('1U != 1 || 1u == 1')\nValue(1)\n&gt;&gt;&gt; e('0 &lt;= -1')\nValue(0)\n&gt;&gt;&gt; e('1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 == 4')\nValue(1)\n&gt;&gt;&gt; e('(3 ^ 5) == 6')\nValue(1)\n&gt;&gt;&gt; e('(3 | 5) == 7')\nValue(1)\n&gt;&gt;&gt; e('(3 &amp; 5) == 1')\nValue(1)\n&gt;&gt;&gt; e('(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1')\nValue(0)\n&gt;&gt;&gt; e('(0 ? 1 : 2) != 2')\nValue(0)\n&gt;&gt;&gt; e('-1 &lt;&lt; 3U &gt; 0')\nValue(0)\n&gt;&gt;&gt; e('0 &amp;&amp; 10 / 0')\nValue(0)\n&gt;&gt;&gt; e('not_defined &amp;&amp; 10 / not_defined')  # doctest: +ELLIPSIS\nException(SyntaxError('Unknown identifier not_defined'...\n&gt;&gt;&gt; e('0 &amp;&amp; 10 / 0 &gt; 1')\nValue(0)\n&gt;&gt;&gt; e('(0) ? 10 / 0 : 0')\nValue(0)\n&gt;&gt;&gt; e('0 == 0 || 10 / 0 &gt; 1')\nValue(1)\n&gt;&gt;&gt; e('(15 &gt;&gt; 2 &gt;&gt; 1 != 1) || (3 &lt;&lt; 2 &lt;&lt; 1 != 24)')\nValue(0)\n&gt;&gt;&gt; e('(1 | 2) == 3 &amp;&amp; 4 != 5 || 0')\nValue(1)\n&gt;&gt;&gt; e('1  &gt;  0')\nValue(1)\n&gt;&gt;&gt; e(\"'S' != 83\")\nValue(0)\n&gt;&gt;&gt; e(\"'\u001b' != '\u001b'\")\nValue(0)\n&gt;&gt;&gt; e('0 + (1 - (2 + (3 - (4 + (5 - (6 + (7 - (8 + (9 - (10 + (11 - (12 +          (13 - (14 + (15 - (16 + (17 - (18 + (19 - (20 + (21 - (22 + (23 -           (24 + (25 - (26 + (27 - (28 + (29 - (30 + (31 - (32 + 0))))))))))           )))))))))))))))))))))) == 0')\nValue(1)\n&gt;&gt;&gt; e('test_function(X)', functions={'test_function':lambda x: 55})\nValue(55)\n&gt;&gt;&gt; e('test_identifier', identifiers={'test_identifier':11})\nValue(11)\n&gt;&gt;&gt; e('defined(X)', functions={'defined':lambda x: 55})\nValue(55)\n&gt;&gt;&gt; e('defined(X)')  # doctest: +ELLIPSIS\nException(SyntaxError('Unknown function defined'...\n&gt;&gt;&gt; e('__has_include(\"variant\")')  # doctest: +ELLIPSIS\nException(SyntaxError('Unknown function __has_include'...\n&gt;&gt;&gt; e('__has_include(&lt;variant&gt;)')  # doctest: +ELLIPSIS\nException(SyntaxError('Unknown function __has_include'...\n&gt;&gt;&gt; e('5  // comment')\nValue(5)\n&gt;&gt;&gt; e('5  /* comment */')\nValue(5)\n&gt;&gt;&gt; e('5  /* comment // more */')\nValue(5)\n&gt;&gt;&gt; e('5  // /* comment */')\n</code></pre>\n                            <p>Value(5)</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class Evaluator(object):\n    &#34;&#34;&#34;Evaluator of #if C preprocessor expressions.\n    \n    &gt;&gt;&gt; e = Evaluator()\n    &gt;&gt;&gt; e(&#39;5&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5+6&#39;)\n    Value(11)\n    &gt;&gt;&gt; e(&#39;5+6*2&#39;)\n    Value(17)\n    &gt;&gt;&gt; e(&#39;5/2+6*2&#39;)\n    Value(14)\n    &gt;&gt;&gt; e(&#39;5 &lt; 6 &lt;= 7&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;5 &lt; 6 &amp;&amp; 8 &gt; 7&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;18446744073709551615 == -1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;-9223372036854775809 == 9223372036854775807&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;-1 &lt; 0U&#39;)\n    Value(0U)\n    &gt;&gt;&gt; e(&#39;(( 0L &amp;&amp; 0) || (!0L &amp;&amp; !0 ))&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(1)?2:3&#39;)\n    Value(2)\n    &gt;&gt;&gt; e(&#39;(1 ? -1 : 0) &lt;= 0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(1 ? -1 : 0U)&#39;)       # Output type of ? must be common between both choices\n    Value(18446744073709551615U)\n    &gt;&gt;&gt; e(&#39;(1 ? -1 : 0U) &lt;= 0&#39;)\n    Value(0U)\n    &gt;&gt;&gt; e(&#39;1 &amp;&amp; 10 / 0&#39;)         # doctest: +ELLIPSIS\n    Exception(ZeroDivisionError(&#39;division by zero&#39;...\n    &gt;&gt;&gt; e(&#39;0 &amp;&amp; 10 / 0&#39;)         # &amp;&amp; must shortcut\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1 ? 10 / 0 : 0&#39;)      # doctest: +ELLIPSIS\n    Exception(ZeroDivisionError(&#39;division by zero&#39;...\n    &gt;&gt;&gt; e(&#39;0 ? 10 / 0 : 0&#39;)      # ? must shortcut\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 != 4&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(2 || 3) != 1 || (2 &amp;&amp; 3) != 1 || (0 || 4) != 1 || (0 &amp;&amp; 5) != 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;-1 &lt;&lt; 3U &gt; 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#34;&#39;N&#39; == 78&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0x3f == 63&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\n&#39;&#34;)\n    Value(10)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\\\\\\\\\&#39;&#34;)\n    Value(92)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\n&#39; == 0xA&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;&#39;\\\\\\\\\\\\\\\\&#39; == 0x5c&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;L&#39;\\\\\\\\0&#39; == 0&#34;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;12 == 12&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;12L == 12&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;-1 &gt;= 0U&#39;)\n    Value(1U)\n    &gt;&gt;&gt; e(&#39;(1&lt;&lt;2) == 4&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(-!+!9) == -1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(2 || 3) == 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;1L * 3 != 3&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(!1L != 0) || (-1L != -1)&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0177777 == 65535&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0Xffff != 65535 || 0XFfFf == 65535&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0L != 0 || 0l != 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1U != 1 || 1u == 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;0 &lt;= -1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;1 &lt;&lt; 2 != 4 || 8 &gt;&gt; 1 == 4&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 ^ 5) == 6&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 | 5) == 7&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 &amp; 5) == 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(3 ^ 5) != 6 || (3 | 5) != 7 || (3 &amp; 5) != 1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(0 ? 1 : 2) != 2&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;-1 &lt;&lt; 3U &gt; 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0 &amp;&amp; 10 / 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;not_defined &amp;&amp; 10 / not_defined&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown identifier not_defined&#39;...\n    &gt;&gt;&gt; e(&#39;0 &amp;&amp; 10 / 0 &gt; 1&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(0) ? 10 / 0 : 0&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0 == 0 || 10 / 0 &gt; 1&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;(15 &gt;&gt; 2 &gt;&gt; 1 != 1) || (3 &lt;&lt; 2 &lt;&lt; 1 != 24)&#39;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;(1 | 2) == 3 &amp;&amp; 4 != 5 || 0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;1  &gt;  0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#34;&#39;\\123&#39; != 83&#34;)\n    Value(0)\n    &gt;&gt;&gt; e(&#34;&#39;\\x1b&#39; != &#39;\\033&#39;&#34;)\n    Value(0)\n    &gt;&gt;&gt; e(&#39;0 + (1 - (2 + (3 - (4 + (5 - (6 + (7 - (8 + (9 - (10 + (11 - (12 +          (13 - (14 + (15 - (16 + (17 - (18 + (19 - (20 + (21 - (22 + (23 -           (24 + (25 - (26 + (27 - (28 + (29 - (30 + (31 - (32 + 0))))))))))           )))))))))))))))))))))) == 0&#39;)\n    Value(1)\n    &gt;&gt;&gt; e(&#39;test_function(X)&#39;, functions={&#39;test_function&#39;:lambda x: 55})\n    Value(55)\n    &gt;&gt;&gt; e(&#39;test_identifier&#39;, identifiers={&#39;test_identifier&#39;:11})\n    Value(11)\n    &gt;&gt;&gt; e(&#39;defined(X)&#39;, functions={&#39;defined&#39;:lambda x: 55})\n    Value(55)\n    &gt;&gt;&gt; e(&#39;defined(X)&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown function defined&#39;...\n    &gt;&gt;&gt; e(&#39;__has_include(&#34;variant&#34;)&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown function __has_include&#39;...\n    &gt;&gt;&gt; e(&#39;__has_include(&lt;variant&gt;)&#39;)  # doctest: +ELLIPSIS\n    Exception(SyntaxError(&#39;Unknown function __has_include&#39;...\n    &gt;&gt;&gt; e(&#39;5  // comment&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5  /* comment */&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5  /* comment // more */&#39;)\n    Value(5)\n    &gt;&gt;&gt; e(&#39;5  // /* comment */&#39;)\n    Value(5)\n    &#34;&#34;&#34;\n#    &gt;&gt;&gt; e(&#39;defined X&#39;, functions={&#39;defined&#39;:lambda x: 55})\n#    Value(55)\n\n    def __init__(self, lexer = None):\n        self.lexer = lexer if lexer is not None else default_lexer()\n        self.parser = yacc.yacc(optimize=in_production,debug=not in_production,write_tables=not in_production)\n\n    class __lexer(object):\n\n        def __init__(self, functions, identifiers):\n            self.__toks = []\n            self.__functions = functions\n            self.__identifiers = identifiers\n\n        def input(self, toks):\n            self.__toks = [tok for tok in toks if tok.type != &#39;CPP_WS&#39; and tok.type != &#39;CPP_LINECONT&#39; and tok.type != &#39;CPP_COMMENT1&#39; and tok.type != &#39;CPP_COMMENT2&#39;]\n            self.__idx = 0\n\n        def token(self):\n            if self.__idx &gt;= len(self.__toks):\n                return None\n            self.__idx = self.__idx + 1\n            return self.__toks[self.__idx - 1]\n\n        def on_function_call(self, p):\n            if p[1] not in self.__functions:\n                raise SyntaxError(&#39;Unknown function %s&#39; % p[1])\n            p[0] = Value(self.__functions[p[1]](p[3]))\n\n        def on_identifier(self, p):\n            if p[1] not in self.__identifiers:\n                raise SyntaxError(&#39;Unknown identifier %s&#39; % p[1])\n            p[0] = Value(self.__identifiers[p[1]])\n            \n    def __call__(self, input, functions = {}, identifiers = {}):\n        &#34;&#34;&#34;Execute a fully macro expanded set of tokens representing an expression,\n        returning the result of the evaluation.\n        &#34;&#34;&#34;\n        if not isinstance(input,list):\n            self.lexer.input(input)\n            input = []\n            while True:\n                tok = self.lexer.token()\n                if not tok:\n                    break\n                input.append(tok)\n        return self.parser.parse(input, lexer = self.__lexer(functions, identifiers))</code></pre>\n                        </details>\n                        <h3>Methods</h3>\n                        <dl>\n                            <dt id=\"pcpp.preprocessor.Evaluator.__init__\"><code class=\"name flex\">\n<span>def <span class=\"ident\">__init__</span></span>(<span>self, lexer=None)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Initialize self.\n                                        See help(type(self)) for accurate signature.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def __init__(self, lexer = None):\n    self.lexer = lexer if lexer is not None else default_lexer()\n    self.parser = yacc.yacc(optimize=in_production,debug=not in_production,write_tables=not in_production)</code></pre>\n                                </details>\n                            </dd>\n                        </dl>\n                    </dd>\n                    <dt id=\"pcpp.preprocessor.OutputDirective\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">OutputDirective</span></span>\n<span>(</span><span><small>ancestors:</small> builtins.Exception, builtins.BaseException)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>Raise this exception to abort processing of a preprocessor directive and\n                                to instead output it as is into the output</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class OutputDirective(Exception):\n    &#34;&#34;&#34;Raise this exception to abort processing of a preprocessor directive and\n    to instead output it as is into the output&#34;&#34;&#34;\n    def __init__(self, action):\n        self.action = action</code></pre>\n                        </details>\n                        <h3>Methods</h3>\n                        <dl>\n                            <dt id=\"pcpp.preprocessor.OutputDirective.__init__\"><code class=\"name flex\">\n<span>def <span class=\"ident\">__init__</span></span>(<span>self, action)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Initialize self.\n                                        See help(type(self)) for accurate signature.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def __init__(self, action):\n    self.action = action</code></pre>\n                                </details>\n                            </dd>\n                        </dl>\n                    </dd>\n                    <dt id=\"pcpp.preprocessor.Preprocessor\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">Preprocessor</span></span>\n<span>(</span><span><small>ancestors:</small> <a title=\"pcpp.parser.PreprocessorHooks\" href=\"parser.html#pcpp.parser.PreprocessorHooks\">PreprocessorHooks</a>)</span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>Override these in your subclass of Preprocessor to customise preprocessing</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class Preprocessor(PreprocessorHooks):    \n    def __init__(self,lexer=None):\n        super(Preprocessor, self).__init__()\n        if lexer is None:\n            lexer = default_lexer()\n        self.lexer = lexer\n        self.evaluator = Evaluator(self.lexer)\n        self.macros = { }\n        self.path = []           # list of -I formal search paths for includes\n        self.temp_path = []      # list of temporary search paths for includes\n        self.rewrite_paths = [(re.escape(os.path.abspath(&#39;&#39;) + os.sep) + &#39;(.*)&#39;, &#39;\\\\1&#39;)]\n        self.passthru_includes = None\n        self.include_once = {}\n        self.include_depth = 0\n        self.include_times = []  # list of FileInclusionTime\n        self.return_code = 0\n        self.debugout = None\n        self.auto_pragma_once_enabled = True\n        self.line_directive = &#39;#line&#39;\n        self.compress = False\n        self.assume_encoding = None\n\n        # Probe the lexer for selected tokens\n        self.__lexprobe()\n\n        tm = time.localtime()\n        self.define(&#34;__DATE__ \\&#34;%s\\&#34;&#34; % time.strftime(&#34;%b %d %Y&#34;,tm))\n        self.define(&#34;__TIME__ \\&#34;%s\\&#34;&#34; % time.strftime(&#34;%H:%M:%S&#34;,tm))\n        self.define(&#34;__PCPP__ 1&#34;)\n        self.expand_linemacro = True\n        self.expand_filemacro = True\n        self.expand_countermacro = True\n        self.linemacro = 0\n        self.linemacrodepth = 0\n        self.countermacro = 0\n        self.parser = None\n\n    # -----------------------------------------------------------------------------\n    # tokenize()\n    #\n    # Utility function. Given a string of text, tokenize into a list of tokens\n    # -----------------------------------------------------------------------------\n\n    def tokenize(self,text):\n        &#34;&#34;&#34;Utility function. Given a string of text, tokenize into a list of tokens&#34;&#34;&#34;\n        tokens = []\n        self.lexer.input(text)\n        while True:\n            tok = self.lexer.token()\n            if not tok: break\n            tok.source = &#39;&#39;\n            tokens.append(tok)\n        return tokens\n\n    # ----------------------------------------------------------------------\n    # __lexprobe()\n    #\n    # This method probes the preprocessor lexer object to discover\n    # the token types of symbols that are important to the preprocessor.\n    # If this works right, the preprocessor will simply &#34;work&#34;\n    # with any suitable lexer regardless of how tokens have been named.\n    # ----------------------------------------------------------------------\n\n    def __lexprobe(self):\n\n        # Determine the token type for identifiers\n        self.lexer.input(&#34;identifier&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;identifier&#34;:\n            print(&#34;Couldn&#39;t determine identifier type&#34;)\n        else:\n            self.t_ID = tok.type\n\n        # Determine the token type for integers\n        self.lexer.input(&#34;12345&#34;)\n        tok = self.lexer.token()\n        if not tok or int(tok.value) != 12345:\n            print(&#34;Couldn&#39;t determine integer type&#34;)\n        else:\n            self.t_INTEGER = tok.type\n            self.t_INTEGER_TYPE = type(tok.value)\n\n        # Determine the token type for character\n        self.lexer.input(&#34;&#39;a&#39;&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;&#39;a&#39;&#34;:\n            print(&#34;Couldn&#39;t determine character type&#34;)\n        else:\n            self.t_CHAR = tok.type\n            \n        # Determine the token type for strings enclosed in double quotes\n        self.lexer.input(&#34;\\&#34;filename\\&#34;&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;\\&#34;filename\\&#34;&#34;:\n            print(&#34;Couldn&#39;t determine string type&#34;)\n        else:\n            self.t_STRING = tok.type\n\n        # Determine the token type for whitespace--if any\n        self.lexer.input(&#34;  &#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;  &#34;:\n            self.t_SPACE = None\n        else:\n            self.t_SPACE = tok.type\n\n        # Determine the token type for newlines\n        self.lexer.input(&#34;\\n&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;\\n&#34;:\n            self.t_NEWLINE = None\n            print(&#34;Couldn&#39;t determine token for newlines&#34;)\n        else:\n            self.t_NEWLINE = tok.type\n\n        # Determine the token type for line continuations\n        self.lexer.input(&#34;\\\\     \\n&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;     &#34;:\n            self.t_LINECONT = None\n            print(&#34;Couldn&#39;t determine token for line continuations&#34;)\n        else:\n            self.t_LINECONT = tok.type\n\n        self.t_WS = (self.t_SPACE, self.t_NEWLINE, self.t_LINECONT)\n\n        self.lexer.input(&#34;##&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;##&#34;:\n            print(&#34;Couldn&#39;t determine token for token pasting operator&#34;)\n        else:\n            self.t_DPOUND = tok.type\n\n        self.lexer.input(&#34;?&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;?&#34;:\n            print(&#34;Couldn&#39;t determine token for ternary operator&#34;)\n        else:\n            self.t_TERNARY = tok.type\n\n        self.lexer.input(&#34;:&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;:&#34;:\n            print(&#34;Couldn&#39;t determine token for ternary operator&#34;)\n        else:\n            self.t_COLON = tok.type\n\n        self.lexer.input(&#34;/* comment */&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;/* comment */&#34;:\n            print(&#34;Couldn&#39;t determine comment type&#34;)\n        else:\n            self.t_COMMENT1 = tok.type\n\n        self.lexer.input(&#34;// comment&#34;)\n        tok = self.lexer.token()\n        if not tok or tok.value != &#34;// comment&#34;:\n            print(&#34;Couldn&#39;t determine comment type&#34;)\n        else:\n            self.t_COMMENT2 = tok.type\n            \n        self.t_COMMENT = (self.t_COMMENT1, self.t_COMMENT2)\n\n        # Check for other characters used by the preprocessor\n        chars = [ &#39;&lt;&#39;,&#39;&gt;&#39;,&#39;#&#39;,&#39;##&#39;,&#39;\\\\&#39;,&#39;(&#39;,&#39;)&#39;,&#39;,&#39;,&#39;.&#39;]\n        for c in chars:\n            self.lexer.input(c)\n            tok = self.lexer.token()\n            if not tok or tok.value != c:\n                print(&#34;Unable to lex &#39;%s&#39; required for preprocessor&#34; % c)\n\n    # ----------------------------------------------------------------------\n    # add_path()\n    #\n    # Adds a search path to the preprocessor.  \n    # ----------------------------------------------------------------------\n\n    def add_path(self,path):\n        &#34;&#34;&#34;Adds a search path to the preprocessor. &#34;&#34;&#34;\n        self.path.append(path)\n        # If the search path being added is relative, or has a common ancestor to the\n        # current working directory, add a rewrite to relativise includes from this\n        # search path\n        relpath = None\n        try:\n            relpath = os.path.relpath(path)\n        except: pass\n        if relpath is not None:\n            self.rewrite_paths += [(re.escape(os.path.abspath(path) + os.sep) + &#39;(.*)&#39;, os.path.join(relpath, &#39;\\\\1&#39;))]\n\n\n    # ----------------------------------------------------------------------\n    # group_lines()\n    #\n    # Given an input string, this function splits it into lines.  Trailing whitespace\n    # is removed. This function forms the lowest level of the preprocessor---grouping\n    # text into a line-by-line format.\n    # ----------------------------------------------------------------------\n\n    def group_lines(self,input,abssource):\n        r&#34;&#34;&#34;Given an input string, this function splits it into lines.  Trailing whitespace\n        is removed. This function forms the lowest level of the preprocessor---grouping\n        text into a line-by-line format.\n        &#34;&#34;&#34;\n        lex = self.lexer.clone()\n        lines = [x.rstrip() for x in input.splitlines()]\n\n        input = &#34;\\n&#34;.join(lines)\n        lex.input(input)\n        lex.lineno = 1\n\n        current_line = []\n        while True:\n            tok = lex.token()\n            if not tok:\n                break\n            tok.source = abssource\n            current_line.append(tok)\n            if tok.type in self.t_WS and tok.value == &#39;\\n&#39;:\n                yield current_line\n                current_line = []\n\n        if current_line:\n            nltok = copy.copy(current_line[-1])\n            nltok.type = self.t_NEWLINE\n            nltok.value = &#39;\\n&#39;\n            current_line.append(nltok)\n            yield current_line\n\n    # ----------------------------------------------------------------------\n    # tokenstrip()\n    # \n    # Remove leading/trailing whitespace tokens from a token list\n    # ----------------------------------------------------------------------\n\n    def tokenstrip(self,tokens):\n        &#34;&#34;&#34;Remove leading/trailing whitespace tokens from a token list&#34;&#34;&#34;\n        i = 0\n        while i &lt; len(tokens) and tokens[i].type in self.t_WS:\n            i += 1\n        del tokens[:i]\n        i = len(tokens)-1\n        while i &gt;= 0 and tokens[i].type in self.t_WS:\n            i -= 1\n        del tokens[i+1:]\n        return tokens\n\n\n    # ----------------------------------------------------------------------\n    # collect_args()\n    #\n    # Collects comma separated arguments from a list of tokens.   The arguments\n    # must be enclosed in parenthesis.  Returns a tuple (tokencount,args,positions)\n    # where tokencount is the number of tokens consumed, args is a list of arguments,\n    # and positions is a list of integers containing the starting index of each\n    # argument.  Each argument is represented by a list of tokens.\n    #\n    # When collecting arguments, leading and trailing whitespace is removed\n    # from each argument.  \n    #\n    # This function properly handles nested parenthesis and commas---these do not\n    # define new arguments.\n    # ----------------------------------------------------------------------\n\n    def collect_args(self,tokenlist,ignore_errors=False):\n        &#34;&#34;&#34;Collects comma separated arguments from a list of tokens.   The arguments\n        must be enclosed in parenthesis.  Returns a tuple (tokencount,args,positions)\n        where tokencount is the number of tokens consumed, args is a list of arguments,\n        and positions is a list of integers containing the starting index of each\n        argument.  Each argument is represented by a list of tokens.\n        \n        When collecting arguments, leading and trailing whitespace is removed\n        from each argument.  \n        \n        This function properly handles nested parenthesis and commas---these do not\n        define new arguments.&#34;&#34;&#34;\n        args = []\n        positions = []\n        current_arg = []\n        nesting = 1\n        tokenlen = len(tokenlist)\n    \n        # Search for the opening &#39;(&#39;.\n        i = 0\n        while (i &lt; tokenlen) and (tokenlist[i].type in self.t_WS):\n            i += 1\n\n        if (i &lt; tokenlen) and (tokenlist[i].value == &#39;(&#39;):\n            positions.append(i+1)\n        else:\n            if not ignore_errors:\n                self.on_error(tokenlist[0].source,tokenlist[0].lineno,&#34;Missing &#39;(&#39; in macro arguments&#34;)\n            return 0, [], []\n\n        i += 1\n\n        while i &lt; tokenlen:\n            t = tokenlist[i]\n            if t.value == &#39;(&#39;:\n                current_arg.append(t)\n                nesting += 1\n            elif t.value == &#39;)&#39;:\n                nesting -= 1\n                if nesting == 0:\n                    args.append(self.tokenstrip(current_arg))\n                    positions.append(i)\n                    return i+1,args,positions\n                current_arg.append(t)\n            elif t.value == &#39;,&#39; and nesting == 1:\n                args.append(self.tokenstrip(current_arg))\n                positions.append(i+1)\n                current_arg = []\n            else:\n                current_arg.append(t)\n            i += 1\n    \n        # Missing end argument\n        if not ignore_errors:\n            self.on_error(tokenlist[-1].source,tokenlist[-1].lineno,&#34;Missing &#39;)&#39; in macro arguments&#34;)\n        return 0, [],[]\n\n    # ----------------------------------------------------------------------\n    # macro_prescan()\n    #\n    # Examine the macro value (token sequence) and identify patch points\n    # This is used to speed up macro expansion later on---we&#39;ll know\n    # right away where to apply patches to the value to form the expansion\n    # ----------------------------------------------------------------------\n    \n    def macro_prescan(self,macro):\n        &#34;&#34;&#34;Examine the macro value (token sequence) and identify patch points\n        This is used to speed up macro expansion later on---we&#39;ll know\n        right away where to apply patches to the value to form the expansion&#34;&#34;&#34;\n        macro.patch     = []             # Standard macro arguments \n        macro.str_patch = []             # String conversion expansion\n        macro.var_comma_patch = []       # Variadic macro comma patch\n        i = 0\n        #print(&#34;BEFORE&#34;, macro.value)\n        #print(&#34;BEFORE&#34;, [x.value for x in macro.value])\n        while i &lt; len(macro.value):\n            if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:\n                argnum = macro.arglist.index(macro.value[i].value)\n                # Conversion of argument to a string\n                j = i - 1\n                while j &gt;= 0 and macro.value[j].type in self.t_WS:\n                    j -= 1\n                if j &gt;= 0 and macro.value[j].value == &#39;#&#39;:\n                    macro.value[i] = copy.copy(macro.value[i])\n                    macro.value[i].type = self.t_STRING\n                    while i &gt; j:\n                        del macro.value[j]\n                        i -= 1\n                    macro.str_patch.append((argnum,i))\n                    continue\n                # Concatenation\n                elif (i &gt; 0 and macro.value[i-1].value == &#39;##&#39;):\n                    macro.patch.append((&#39;t&#39;,argnum,i))\n                    i += 1\n                    continue\n                elif ((i+1) &lt; len(macro.value) and macro.value[i+1].value == &#39;##&#39;):\n                    macro.patch.append((&#39;t&#39;,argnum,i))\n                    i += 1\n                    continue\n                # Standard expansion\n                else:\n                    macro.patch.append((&#39;e&#39;,argnum,i))\n            elif macro.value[i].value == &#39;##&#39;:\n                if macro.variadic and (i &gt; 0) and (macro.value[i-1].value == &#39;,&#39;) and \\\n                        ((i+1) &lt; len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \\\n                        (macro.value[i+1].value == macro.vararg):\n                    macro.var_comma_patch.append(i-1)\n            i += 1\n        macro.patch.sort(key=lambda x: x[2],reverse=True)\n        #print(&#34;AFTER&#34;, macro.value)\n        #print(&#34;AFTER&#34;, [x.value for x in macro.value])\n        #print(macro.patch)\n\n    # ----------------------------------------------------------------------\n    # macro_expand_args()\n    #\n    # Given a Macro and list of arguments (each a token list), this method\n    # returns an expanded version of a macro.  The return value is a token sequence\n    # representing the replacement macro tokens\n    # ----------------------------------------------------------------------\n\n    def macro_expand_args(self,macro,args):\n        &#34;&#34;&#34;Given a Macro and list of arguments (each a token list), this method\n        returns an expanded version of a macro.  The return value is a token sequence\n        representing the replacement macro tokens&#34;&#34;&#34;\n        # Make a copy of the macro token sequence\n        rep = [copy.copy(_x) for _x in macro.value]\n\n        # Make string expansion patches.  These do not alter the length of the replacement sequence\n        str_expansion = {}\n        for argnum, i in macro.str_patch:\n            if argnum not in str_expansion:\n                # Strip all non-space whitespace before stringization\n                tokens = copy.copy(args[argnum])\n                for j in xrange(len(tokens)):\n                    if tokens[j].type in self.t_WS and tokens[j].type != self.t_LINECONT:\n                        tokens[j].value = &#39; &#39;\n                # Collapse all multiple whitespace too\n                j = 0\n                while j &lt; len(tokens) - 1:\n                    if tokens[j].type in self.t_WS and tokens[j+1].type in self.t_WS:\n                        del tokens[j+1]\n                    else:\n                        j += 1\n                str = &#34;&#34;.join([x.value for x in tokens])\n                str = str.replace(&#34;\\\\&#34;,&#34;\\\\\\\\&#34;).replace(&#39;&#34;&#39;, &#39;\\\\&#34;&#39;)\n                str_expansion[argnum] = &#39;&#34;&#39; + str + &#39;&#34;&#39;\n            rep[i] = copy.copy(rep[i])\n            rep[i].value = str_expansion[argnum]\n\n        # Make the variadic macro comma patch.  If the variadic macro argument is empty, we get rid\n        comma_patch = False\n        if macro.variadic and not args[-1]:\n            for i in macro.var_comma_patch:\n                rep[i] = None\n                comma_patch = True\n\n        # Make all other patches.   The order of these matters.  It is assumed that the patch list\n        # has been sorted in reverse order of patch location since replacements will cause the\n        # size of the replacement sequence to expand from the patch point.\n        \n        expanded = { }\n        #print(&#34;***&#34;, macro)\n        #print(macro.patch)\n        for ptype, argnum, i in macro.patch:\n            #print([x.value for x in rep])\n            # Concatenation.   Argument is left unexpanded\n            if ptype == &#39;t&#39;:\n                rep[i:i+1] = args[argnum]\n            # Normal expansion.  Argument is macro expanded first\n            elif ptype == &#39;e&#39;:\n                #print(&#39;*** Function macro arg&#39;, rep[i], &#39;replace with&#39;, args[argnum], &#39;which expands into&#39;, self.expand_macros(copy.copy(args[argnum])))\n                if argnum not in expanded:\n                    expanded[argnum] = self.expand_macros(copy.copy(args[argnum]))\n                rep[i:i+1] = expanded[argnum]\n\n        # Get rid of removed comma if necessary\n        if comma_patch:\n            rep = [_i for _i in rep if _i]\n            \n        # Do a token concatenation pass, stitching any tokens separated by ## into a single token\n        while len(rep) and rep[0].type == self.t_DPOUND:\n            del rep[0]\n        while len(rep) and rep[-1].type == self.t_DPOUND:\n            del rep[-1]\n        i = 1\n        stitched = False\n        while i &lt; len(rep) - 1:\n            if rep[i].type == self.t_DPOUND:\n                j = i + 1\n                while rep[j].type == self.t_DPOUND:\n                    j += 1\n                rep[i-1] = copy.copy(rep[i-1])\n                rep[i-1].type = None\n                rep[i-1].value += rep[j].value\n                while j &gt;= i:\n                    del rep[i]\n                    j -= 1\n                stitched = True\n            else:\n                i += 1\n        if stitched:\n            # Stitched tokens will have unknown type, so figure those out now\n            i = 0\n            lex = self.lexer.clone()\n            while i &lt; len(rep):\n                if rep[i].type is None:\n                    lex.input(rep[i].value)\n                    toks = []\n                    while True:\n                        tok = lex.token()\n                        if not tok:\n                            break\n                        toks.append(tok)\n                    if len(toks) != 1:\n                        # Split it once again\n                        while len(toks) &gt; 1:\n                            rep.insert(i+1, copy.copy(rep[i]))\n                            rep[i+1].value = toks[-1].value\n                            rep[i+1].type = toks[-1].type\n                            toks.pop()\n                        rep[i].value = toks[0].value\n                        rep[i].type = toks[0].type\n                    else:\n                        rep[i].type = toks[0].type\n                i += 1\n\n        #print rep\n        return rep\n\n\n    # ----------------------------------------------------------------------\n    # expand_macros()\n    #\n    # Given a list of tokens, this function performs macro expansion.\n    # ----------------------------------------------------------------------\n\n    def expand_macros(self,tokens,expanding_from=[]):\n        &#34;&#34;&#34;Given a list of tokens, this function performs macro expansion.&#34;&#34;&#34;\n        # Each token needs to track from which macros it has been expanded from to prevent recursion\n        for tok in tokens:\n            if not hasattr(tok, &#39;expanded_from&#39;):\n                tok.expanded_from = []\n        i = 0\n        #print(&#34;*** EXPAND MACROS in&#34;, &#34;&#34;.join([t.value for t in tokens]), &#34;expanding_from=&#34;, expanding_from)\n        #print(tokens)\n        #print([(t.value, t.expanded_from) for t in tokens])\n        while i &lt; len(tokens):\n            t = tokens[i]\n            if self.linemacrodepth == 0:\n                self.linemacro = t.lineno\n            self.linemacrodepth = self.linemacrodepth + 1\n            if t.type == self.t_ID:\n                if t.value in self.macros and t.value not in t.expanded_from and t.value not in expanding_from:\n                    # Yes, we found a macro match\n                    m = self.macros[t.value]\n                    if m.arglist is None:\n                        # A simple macro\n                        rep = [copy.copy(_x) for _x in m.value]\n                        ex = self.expand_macros(rep, expanding_from + [t.value])\n                        #print(&#34;\\nExpanding macro&#34;, m, &#34;\\ninto&#34;, ex, &#34;\\nreplacing&#34;, tokens[i:i+1])\n                        for e in ex:\n                            e.source = t.source\n                            e.lineno = t.lineno\n                            if not hasattr(e, &#39;expanded_from&#39;):\n                                e.expanded_from = []\n                            e.expanded_from.append(t.value)\n                        tokens[i:i+1] = ex\n                    else:\n                        # A macro with arguments\n                        j = i + 1\n                        while j &lt; len(tokens) and (tokens[j].type in self.t_WS or tokens[j].type in self.t_COMMENT):\n                            j += 1\n                        # A function like macro without an invocation list is to be ignored\n                        if j == len(tokens) or tokens[j].value != &#39;(&#39;:\n                            i = j\n                        else:\n                            tokcount,args,positions = self.collect_args(tokens[j:], True)\n                            if tokcount == 0:\n                                # Unclosed parameter list, just bail out\n                                break\n                            if (not m.variadic\n                                # A no arg or single arg consuming macro is permitted to be expanded with nothing\n                                and (args != [[]] or len(m.arglist) &gt; 1)\n                                and len(args) !=  len(m.arglist)):\n                                self.on_error(t.source,t.lineno,&#34;Macro %s requires %d arguments but was passed %d&#34; % (t.value,len(m.arglist),len(args)))\n                                i = j + tokcount\n                            elif m.variadic and len(args) &lt; len(m.arglist)-1:\n                                if len(m.arglist) &gt; 2:\n                                    self.on_error(t.source,t.lineno,&#34;Macro %s must have at least %d arguments&#34; % (t.value, len(m.arglist)-1))\n                                else:\n                                    self.on_error(t.source,t.lineno,&#34;Macro %s must have at least %d argument&#34; % (t.value, len(m.arglist)-1))\n                                i = j + tokcount\n                            else:\n                                if m.variadic:\n                                    if len(args) == len(m.arglist)-1:\n                                        args.append([])\n                                    else:\n                                        args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]\n                                        del args[len(m.arglist):]\n                                else:\n                                    # If we called a single arg macro with empty, fake extend args\n                                    while len(args) &lt; len(m.arglist):\n                                        args.append([])\n                                        \n                                # Get macro replacement text\n                                rep = self.macro_expand_args(m,args)\n                                ex = self.expand_macros(rep, expanding_from + [t.value])\n                                for e in ex:\n                                    e.source = t.source\n                                    e.lineno = t.lineno\n                                    if not hasattr(e, &#39;expanded_from&#39;):\n                                        e.expanded_from = []\n                                    e.expanded_from.append(t.value)\n                                # A non-conforming extension implemented by the GCC and clang preprocessors\n                                # is that an expansion of a macro with arguments where the following token is\n                                # an identifier inserts a space between the expansion and the identifier. This\n                                # differs from Boost.Wave incidentally (see https://github.com/ned14/pcpp/issues/29)\n                                if len(tokens) &gt; j+tokcount and tokens[j+tokcount].type in self.t_ID:\n                                    #print(&#34;*** token after expansion is&#34;, tokens[j+tokcount])\n                                    newtok = copy.copy(tokens[j+tokcount])\n                                    newtok.type = self.t_SPACE\n                                    newtok.value = &#39; &#39;\n                                    ex.append(newtok)\n                                #print(&#34;\\nExpanding macro&#34;, m, &#34;\\n\\ninto&#34;, ex, &#34;\\n\\nreplacing&#34;, tokens[i:j+tokcount])\n                                tokens[i:j+tokcount] = ex\n                    self.linemacrodepth = self.linemacrodepth - 1\n                    if self.linemacrodepth == 0:\n                        self.linemacro = 0\n                    continue\n                elif self.expand_linemacro and t.value == &#39;__LINE__&#39;:\n                    t.type = self.t_INTEGER\n                    t.value = self.t_INTEGER_TYPE(self.linemacro)\n                elif self.expand_countermacro and t.value == &#39;__COUNTER__&#39;:\n                    t.type = self.t_INTEGER\n                    t.value = self.t_INTEGER_TYPE(self.countermacro)\n                    self.countermacro += 1\n                \n            i += 1\n            self.linemacrodepth = self.linemacrodepth - 1\n            if self.linemacrodepth == 0:\n                self.linemacro = 0\n        return tokens\n\n    # ----------------------------------------------------------------------    \n    # evalexpr()\n    # \n    # Evaluate an expression token sequence for the purposes of evaluating\n    # integral expressions.\n    # ----------------------------------------------------------------------\n\n    def evalexpr(self,tokens):\n        &#34;&#34;&#34;Evaluate an expression token sequence for the purposes of evaluating\n        integral expressions.&#34;&#34;&#34;\n        if not tokens:\n            self.on_error(&#39;unknown&#39;, 0, &#34;Empty expression&#34;)\n            return (0, None)\n        # tokens = tokenize(line)\n        # Search for defined macros\n        partial_expansion = False\n        def replace_defined(tokens):\n            i = 0\n            while i &lt; len(tokens):\n                if tokens[i].type == self.t_ID and tokens[i].value == &#39;defined&#39;:\n                    j = i + 1\n                    needparen = False\n                    result = &#34;0L&#34;\n                    while j &lt; len(tokens):\n                        if tokens[j].type in self.t_WS:\n                            j += 1\n                            continue\n                        elif tokens[j].type == self.t_ID:\n                            if tokens[j].value in self.macros:\n                                result = &#34;1L&#34;\n                            else:\n                                repl = self.on_unknown_macro_in_defined_expr(tokens[j])\n                                if repl is None:\n                                    partial_expansion = True\n                                    result = &#39;defined(&#39;+tokens[j].value+&#39;)&#39;\n                                else:\n                                    result = &#34;1L&#34; if repl else &#34;0L&#34;\n                            if not needparen: break\n                        elif tokens[j].value == &#39;(&#39;:\n                            needparen = True\n                        elif tokens[j].value == &#39;)&#39;:\n                            break\n                        else:\n                            self.on_error(tokens[i].source,tokens[i].lineno,&#34;Malformed defined()&#34;)\n                        j += 1\n                    if result.startswith(&#39;defined&#39;):\n                        tokens[i].type = self.t_ID\n                        tokens[i].value = result\n                    else:\n                        tokens[i].type = self.t_INTEGER\n                        tokens[i].value = self.t_INTEGER_TYPE(result)\n                    del tokens[i+1:j+1]\n                i += 1\n            return tokens\n        # Replace any defined(macro) before macro expansion\n        tokens = replace_defined(tokens)\n        tokens = self.expand_macros(tokens)\n        # Replace any defined(macro) after macro expansion\n        tokens = replace_defined(tokens)\n        if not tokens:\n            return (0, None)\n        class IndirectToMacroHook(object):\n            def __init__(self, p):\n                self.__preprocessor = p\n                self.partial_expansion = False\n            def __contains__(self, key):\n                return True\n            def __getitem__(self, key):\n                if key.startswith(&#39;defined(&#39;):\n                    self.partial_expansion = True\n                    return 0\n                repl = self.__preprocessor.on_unknown_macro_in_expr(key)\n                #print(&#34;*** IndirectToMacroHook[&#34;, key, &#34;] returns&#34;, repl, file = sys.stderr)\n                if repl is None:\n                    self.partial_expansion = True\n                    return key\n                return repl\n        evalvars = IndirectToMacroHook(self)\n        class IndirectToMacroFunctionHook(object):\n            def __init__(self, p):\n                self.__preprocessor = p\n                self.partial_expansion = False\n            def __contains__(self, key):\n                return True\n            def __getitem__(self, key):\n                repl = self.__preprocessor.on_unknown_macro_function_in_expr(key)\n                #print(&#34;*** IndirectToMacroFunctionHook[&#34;, key, &#34;] returns&#34;, repl, file = sys.stderr)\n                if repl is None:\n                    self.partial_expansion = True\n                    return key\n                return repl\n        evalfuncts = IndirectToMacroFunctionHook(self)\n        try:\n            result = self.evaluator(tokens, functions = evalfuncts, identifiers = evalvars).value()\n            partial_expansion = partial_expansion or evalvars.partial_expansion or evalfuncts.partial_expansion\n        except OutputDirective:\n            raise\n        except Exception as e:\n            partial_expansion = partial_expansion or evalvars.partial_expansion or evalfuncts.partial_expansion\n            if not partial_expansion:\n                self.on_error(tokens[0].source,tokens[0].lineno,&#34;Could not evaluate expression due to %s (passed to evaluator: &#39;%s&#39;)&#34; % (repr(e), &#39;&#39;.join([tok.value for tok in tokens])))\n            result = 0\n        return (result, tokens) if partial_expansion else (result, None)\n\n    # ----------------------------------------------------------------------\n    # parsegen()\n    #\n    # Parse an input string\n    # ----------------------------------------------------------------------\n    def parsegen(self,input,source=None,abssource=None):\n        &#34;&#34;&#34;Parse an input string&#34;&#34;&#34;\n        rewritten_source = source\n        if abssource:\n            rewritten_source = abssource\n            for rewrite in self.rewrite_paths:\n                temp = re.sub(rewrite[0], rewrite[1], rewritten_source)\n                if temp != abssource:\n                    rewritten_source = temp\n                    if os.sep != &#39;/&#39;:\n                        rewritten_source = rewritten_source.replace(os.sep, &#39;/&#39;)\n                    break\n\n        # Replace trigraph sequences\n        t = trigraph(input)\n        lines = self.group_lines(t, rewritten_source)\n\n        if not source:\n            source = &#34;&#34;\n        if not rewritten_source:\n            rewritten_source = &#34;&#34;\n            \n        my_include_times_idx = len(self.include_times)\n        self.include_times.append(FileInclusionTime(self.macros[&#39;__FILE__&#39;] if &#39;__FILE__&#39; in self.macros else None, source, abssource, self.include_depth))\n        self.include_depth += 1\n        my_include_time_begin = clock()\n        if self.expand_filemacro:\n            self.define(&#34;__FILE__ \\&#34;%s\\&#34;&#34; % rewritten_source)\n\n        self.source = abssource\n        chunk = []\n        enable = True\n        iftrigger = False\n        ifpassthru = False\n        class ifstackentry(object):\n            def __init__(self,enable,iftrigger,ifpassthru,startlinetoks):\n                self.enable = enable\n                self.iftrigger = iftrigger\n                self.ifpassthru = ifpassthru\n                self.rewritten = False\n                self.startlinetoks = startlinetoks\n        ifstack = []\n        # True until any non-whitespace output or anything with effects happens.\n        at_front_of_file = True\n        # True if auto pragma once still a possibility for this #include\n        auto_pragma_once_possible = self.auto_pragma_once_enabled\n        # =(MACRO, 0) means #ifndef MACRO or #if !defined(MACRO) seen, =(MACRO,1) means #define MACRO seen\n        include_guard = None\n        self.on_potential_include_guard(None)\n\n        for x in lines:\n            all_whitespace = True\n            skip_auto_pragma_once_possible_check = False\n            # Handle comments\n            for i,tok in enumerate(x):\n                if tok.type in self.t_COMMENT:\n                    if not self.on_comment(tok):\n                        if tok.type == self.t_COMMENT1:\n                            tok.value = &#39; &#39;\n                        elif tok.type == self.t_COMMENT2:\n                            tok.value = &#39;\\n&#39;\n                        tok.type = &#39;CPP_WS&#39;\n            # Skip over whitespace\n            for i,tok in enumerate(x):\n                if tok.type not in self.t_WS and tok.type not in self.t_COMMENT:\n                    all_whitespace = False\n                    break\n            output_and_expand_line = True\n            output_unexpanded_line = False\n            if tok.value == &#39;#&#39;:\n                precedingtoks = [ tok ]\n                output_and_expand_line = False\n                try:\n                    # Preprocessor directive      \n                    i += 1\n                    while i &lt; len(x) and x[i].type in self.t_WS:\n                        precedingtoks.append(x[i])\n                        i += 1                    \n                    dirtokens = self.tokenstrip(x[i:])\n                    if dirtokens:\n                        name = dirtokens[0].value\n                        args = self.tokenstrip(dirtokens[1:])\n                    \n                        if self.debugout is not None:\n                            print(&#34;%d:%d:%d %s:%d #%s %s&#34; % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno, dirtokens[0].value, &#34;&#34;.join([tok.value for tok in args])), file = self.debugout)\n                            #print(ifstack)\n\n                        handling = self.on_directive_handle(dirtokens[0],args,ifpassthru,precedingtoks)\n                        assert handling == True or handling == None\n                    else:\n                        name = &#34;&#34;\n                        args = []\n                        raise OutputDirective(Action.IgnoreAndRemove)\n                        \n                    if name == &#39;define&#39;:\n                        at_front_of_file = False\n                        if enable:\n                            for tok in self.expand_macros(chunk):\n                                yield tok\n                            chunk = []\n                            if include_guard and include_guard[1] == 0:\n                                if include_guard[0] == args[0].value and len(args) == 1:\n                                    include_guard = (args[0].value, 1)\n                                    # If ifpassthru is only turned on due to this include guard, turn it off\n                                    if ifpassthru and not ifstack[-1].ifpassthru:\n                                        ifpassthru = False\n                            self.define(args)\n                            if self.debugout is not None:\n                                print(&#34;%d:%d:%d %s:%d      %s&#34; % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno, repr(self.macros[args[0].value])), file = self.debugout)\n                            if handling is None:\n                                for tok in x:\n                                    yield tok\n                    elif name == &#39;include&#39;:\n                        if enable:\n                            for tok in self.expand_macros(chunk):\n                                yield tok\n                            chunk = []\n                            oldfile = self.macros[&#39;__FILE__&#39;] if &#39;__FILE__&#39; in self.macros else None\n                            if args and args[0].value != &#39;&lt;&#39; and args[0].type != self.t_STRING:\n                                args = self.tokenstrip(self.expand_macros(args))\n                            #print(&#39;***&#39;, &#39;&#39;.join([x.value for x in args]), file = sys.stderr)\n                            if self.passthru_includes is not None and self.passthru_includes.match(&#39;&#39;.join([x.value for x in args])):\n                                for tok in precedingtoks:\n                                    yield tok\n                                for tok in dirtokens:\n                                    yield tok\n                                for tok in self.include(args):\n                                    pass\n                            else:\n                                for tok in self.include(args):\n                                    yield tok\n                            if oldfile is not None:\n                                self.macros[&#39;__FILE__&#39;] = oldfile\n                            self.source = abssource\n                    elif name == &#39;undef&#39;:\n                        at_front_of_file = False\n                        if enable:\n                            for tok in self.expand_macros(chunk):\n                                yield tok\n                            chunk = []\n                            self.undef(args)\n                            if handling is None:\n                                for tok in x:\n                                    yield tok\n                    elif name == &#39;ifdef&#39;:\n                        at_front_of_file = False\n                        ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                        if enable:\n                            ifpassthru = False\n                            if not args[0].value in self.macros:\n                                res = self.on_unknown_macro_in_defined_expr(args[0])\n                                if res is None:\n                                    ifpassthru = True\n                                    ifstack[-1].rewritten = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                elif res is True:\n                                    iftrigger = True\n                                else:\n                                    enable = False\n                                    iftrigger = False\n                            else:\n                                iftrigger = True\n                    elif name == &#39;ifndef&#39;:\n                        if not ifstack and at_front_of_file:\n                            self.on_potential_include_guard(args[0].value)\n                            include_guard = (args[0].value, 0)\n                        at_front_of_file = False\n                        ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                        if enable:\n                            ifpassthru = False\n                            if args[0].value in self.macros:\n                                enable = False\n                                iftrigger = False\n                            else:\n                                res = self.on_unknown_macro_in_defined_expr(args[0])\n                                if res is None:\n                                    ifpassthru = True\n                                    ifstack[-1].rewritten = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                elif res is True:\n                                    enable = False\n                                    iftrigger = False\n                                else:\n                                    iftrigger = True\n                    elif name == &#39;if&#39;:\n                        if not ifstack and at_front_of_file:\n                            if args[0].value == &#39;!&#39; and args[1].value == &#39;defined&#39;:\n                                n = 2\n                                if args[n].value == &#39;(&#39;: n += 1\n                                self.on_potential_include_guard(args[n].value)\n                                include_guard = (args[n].value, 0)\n                        at_front_of_file = False\n                        ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                        if enable:\n                            iftrigger = False\n                            ifpassthru = False\n                            result, rewritten = self.evalexpr(args)\n                            if rewritten is not None:\n                                x = x[:i+2] + rewritten + [x[-1]]\n                                x[i+1] = copy.copy(x[i+1])\n                                x[i+1].type = self.t_SPACE\n                                x[i+1].value = &#39; &#39;\n                                ifpassthru = True\n                                ifstack[-1].rewritten = True\n                                raise OutputDirective(Action.IgnoreAndPassThrough)\n                            if not result:\n                                enable = False\n                            else:\n                                iftrigger = True\n                    elif name == &#39;elif&#39;:\n                        at_front_of_file = False\n                        if ifstack:\n                            if ifstack[-1].enable:     # We only pay attention if outer &#34;if&#34; allows this\n                                if enable and not ifpassthru:         # If already true, we flip enable False\n                                    enable = False\n                                elif not iftrigger:   # If False, but not triggered yet, we&#39;ll check expression\n                                    result, rewritten = self.evalexpr(args)\n                                    if rewritten is not None:\n                                        enable = True\n                                        if not ifpassthru:\n                                            # This is a passthru #elif after a False #if, so convert to an #if\n                                            x[i].value = &#39;if&#39;\n                                        x = x[:i+2] + rewritten + [x[-1]]\n                                        x[i+1] = copy.copy(x[i+1])\n                                        x[i+1].type = self.t_SPACE\n                                        x[i+1].value = &#39; &#39;\n                                        ifpassthru = True\n                                        ifstack[-1].rewritten = True\n                                        raise OutputDirective(Action.IgnoreAndPassThrough)\n                                    if ifpassthru:\n                                        # If this elif can only ever be true, simulate that\n                                        if result:\n                                            newtok = copy.copy(x[i+3])\n                                            newtok.type = self.t_INTEGER\n                                            newtok.value = self.t_INTEGER_TYPE(result)\n                                            x = x[:i+2] + [newtok] + [x[-1]]\n                                            raise OutputDirective(Action.IgnoreAndPassThrough)\n                                        # Otherwise elide\n                                        enable = False\n                                    elif result:\n                                        enable  = True\n                                        iftrigger = True\n                        else:\n                            self.on_error(dirtokens[0].source,dirtokens[0].lineno,&#34;Misplaced #elif&#34;)\n                            \n                    elif name == &#39;else&#39;:\n                        at_front_of_file = False\n                        if ifstack:\n                            if ifstack[-1].enable:\n                                if ifpassthru:\n                                    enable = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                if enable:\n                                    enable = False\n                                elif not iftrigger:\n                                    enable = True\n                                    iftrigger = True\n                        else:\n                            self.on_error(dirtokens[0].source,dirtokens[0].lineno,&#34;Misplaced #else&#34;)\n\n                    elif name == &#39;endif&#39;:\n                        at_front_of_file = False\n                        if ifstack:\n                            oldifstackentry = ifstack.pop()\n                            enable = oldifstackentry.enable\n                            iftrigger = oldifstackentry.iftrigger\n                            ifpassthru = oldifstackentry.ifpassthru\n                            if self.debugout is not None:\n                                print(&#34;%d:%d:%d %s:%d      (%s:%d %s)&#34; % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno,\n                                    oldifstackentry.startlinetoks[0].source, oldifstackentry.startlinetoks[0].lineno, &#34;&#34;.join([n.value for n in oldifstackentry.startlinetoks])), file = self.debugout)\n                            skip_auto_pragma_once_possible_check = True\n                            if oldifstackentry.rewritten:\n                                raise OutputDirective(Action.IgnoreAndPassThrough)\n                        else:\n                            self.on_error(dirtokens[0].source,dirtokens[0].lineno,&#34;Misplaced #endif&#34;)\n                    elif name == &#39;pragma&#39; and args[0].value == &#39;once&#39;:\n                        if enable:\n                            self.include_once[self.source] = None\n                    elif enable:\n                        # Unknown preprocessor directive\n                        output_unexpanded_line = (self.on_directive_unknown(dirtokens[0], args, ifpassthru, precedingtoks) is None)\n\n                except OutputDirective as e:\n                    if e.action == Action.IgnoreAndPassThrough:\n                        output_unexpanded_line = True\n                    elif e.action == Action.IgnoreAndRemove:\n                        pass\n                    else:\n                        assert False\n\n            # If there is ever any non-whitespace output outside an include guard, auto pragma once is not possible\n            if not skip_auto_pragma_once_possible_check and auto_pragma_once_possible and not ifstack and not all_whitespace:\n                auto_pragma_once_possible = False\n                if self.debugout is not None:\n                    print(&#34;%d:%d:%d %s:%d Determined that #include \\&#34;%s\\&#34; is not entirely wrapped in an include guard macro, disabling auto-applying #pragma once&#34; % (enable, iftrigger, ifpassthru, x[0].source, x[0].lineno, self.source), file = self.debugout)\n                \n            if output_and_expand_line or output_unexpanded_line:\n                if not all_whitespace:\n                    at_front_of_file = False\n\n                # Normal text\n                if enable:\n                    if output_and_expand_line:\n                        chunk.extend(x)\n                    elif output_unexpanded_line:\n                        for tok in self.expand_macros(chunk):\n                            yield tok\n                        chunk = []\n                        for tok in x:\n                            yield tok\n                else:\n                    # Need to extend with the same number of blank lines\n                    i = 0\n                    while i &lt; len(x):\n                        if x[i].type not in self.t_WS:\n                            del x[i]\n                        else:\n                            i += 1\n                    chunk.extend(x)\n\n        for tok in self.expand_macros(chunk):\n            yield tok\n        chunk = []\n        for i in ifstack:\n            self.on_error(i.startlinetoks[0].source, i.startlinetoks[0].lineno, &#34;Unterminated &#34; + &#34;&#34;.join([n.value for n in i.startlinetoks]))\n        if auto_pragma_once_possible and include_guard and include_guard[1] == 1:\n            if self.debugout is not None:\n                print(&#34;%d:%d:%d %s:%d Determined that #include \\&#34;%s\\&#34; is entirely wrapped in an include guard macro called %s, auto-applying #pragma once&#34; % (enable, iftrigger, ifpassthru, self.source, 0, self.source, include_guard[0]), file = self.debugout)\n            self.include_once[self.source] = include_guard[0]\n        elif self.auto_pragma_once_enabled and self.source not in self.include_once:\n            if self.debugout is not None:\n                print(&#34;%d:%d:%d %s:%d Did not auto apply #pragma once to this file due to auto_pragma_once_possible=%d, include_guard=%s&#34; % (enable, iftrigger, ifpassthru, self.source, 0, auto_pragma_once_possible, repr(include_guard)), file = self.debugout)\n        my_include_time_end = clock()\n        self.include_times[my_include_times_idx].elapsed = my_include_time_end - my_include_time_begin\n        self.include_depth -= 1\n\n    # ----------------------------------------------------------------------\n    # include()\n    #\n    # Implementation of file-inclusion\n    # ----------------------------------------------------------------------\n\n    def include(self,tokens):\n        &#34;&#34;&#34;Implementation of file-inclusion&#34;&#34;&#34;\n        # Try to extract the filename and then process an include file\n        if not tokens:\n            return\n        if tokens:\n            if tokens[0].value != &#39;&lt;&#39; and tokens[0].type != self.t_STRING:\n                tokens = self.tokenstrip(self.expand_macros(tokens))\n\n            is_system_include = False\n            if tokens[0].value == &#39;&lt;&#39;:\n                is_system_include = True\n                # Include &lt;...&gt;\n                i = 1\n                while i &lt; len(tokens):\n                    if tokens[i].value == &#39;&gt;&#39;:\n                        break\n                    i += 1\n                else:\n                    self.on_error(tokens[0].source,tokens[0].lineno,&#34;Malformed #include &lt;...&gt;&#34;)\n                    return\n                filename = &#34;&#34;.join([x.value for x in tokens[1:i]])\n                # Search only formally specified paths\n                path = self.path\n            elif tokens[0].type == self.t_STRING:\n                filename = tokens[0].value[1:-1]\n                # Search from each nested include file, as well as formally specified paths\n                path = self.temp_path + self.path\n            else:\n                p = self.on_include_not_found(True,False,self.temp_path[0] if self.temp_path else &#39;&#39;,tokens[0].value)\n                assert p is None\n                return\n        if not path:\n            path = [&#39;&#39;]\n        while True:\n            #print path\n            for p in path:\n                iname = os.path.join(p,filename)\n                fulliname = os.path.abspath(iname)\n                if fulliname in self.include_once:\n                    if self.debugout is not None:\n                        print(&#34;x:x:x x:x #include \\&#34;%s\\&#34; skipped as already seen&#34; % (fulliname), file = self.debugout)\n                    return\n                try:\n                    ih = self.on_file_open(is_system_include,fulliname)\n                    data = ih.read()\n                    ih.close()\n                    dname = os.path.dirname(fulliname)\n                    if dname:\n                        self.temp_path.insert(0,dname)\n                    for tok in self.parsegen(data,filename,fulliname):\n                        yield tok\n                    if dname:\n                        del self.temp_path[0]\n                    return\n                except IOError:\n                    pass\n            else:\n                p = self.on_include_not_found(False,is_system_include,self.temp_path[0] if self.temp_path else &#39;&#39;,filename)\n                assert p is not None\n                path.append(p)\n\n    # ----------------------------------------------------------------------\n    # define()\n    #\n    # Define a new macro\n    # ----------------------------------------------------------------------\n\n    def define(self,tokens):\n        &#34;&#34;&#34;Define a new macro&#34;&#34;&#34;\n        if isinstance(tokens,STRING_TYPES):\n            tokens = self.tokenize(tokens)\n        else:\n            tokens = [copy.copy(tok) for tok in tokens]\n        def add_macro(self, name, macro):\n            macro.source = name.source\n            macro.lineno = name.lineno\n            self.macros[name.value] = macro\n\n        linetok = tokens\n        try:\n            name = linetok[0]\n            if len(linetok) &gt; 1:\n                mtype = linetok[1]\n            else:\n                mtype = None\n            if not mtype:\n                m = Macro(name.value,[])\n                add_macro(self, name, m)\n            elif mtype.type in self.t_WS:\n                # A normal macro\n                m = Macro(name.value,self.tokenstrip(linetok[2:]))\n                add_macro(self, name, m)\n            elif mtype.value == &#39;(&#39;:\n                # A macro with arguments\n                tokcount, args, positions = self.collect_args(linetok[1:])\n                variadic = False\n                for a in args:\n                    if variadic:\n                        self.on_error(name.source,name.lineno,&#34;No more arguments may follow a variadic argument&#34;)\n                        break\n                    astr = &#34;&#34;.join([str(_i.value) for _i in a])\n                    if astr == &#34;...&#34;:\n                        variadic = True\n                        a[0].type = self.t_ID\n                        a[0].value = &#39;__VA_ARGS__&#39;\n                        variadic = True\n                        del a[1:]\n                        continue\n                    elif astr[-3:] == &#34;...&#34; and a[0].type == self.t_ID:\n                        variadic = True\n                        del a[1:]\n                        # If, for some reason, &#34;.&#34; is part of the identifier, strip off the name for the purposes\n                        # of macro expansion\n                        if a[0].value[-3:] == &#39;...&#39;:\n                            a[0].value = a[0].value[:-3]\n                        continue\n                    # Empty arguments are permitted\n                    if len(a) == 0 and len(args) == 1:\n                        continue\n                    if len(a) &gt; 1 or a[0].type != self.t_ID:\n                        self.on_error(a[0].source,a[0].lineno,&#34;Invalid macro argument&#34;)\n                        break\n                else:\n                    mvalue = self.tokenstrip(linetok[1+tokcount:])\n                    i = 0\n                    while i &lt; len(mvalue):\n                        if i+1 &lt; len(mvalue):\n                            if mvalue[i].type in self.t_WS and mvalue[i+1].value == &#39;##&#39;:\n                                del mvalue[i]\n                                continue\n                            elif mvalue[i].value == &#39;##&#39; and mvalue[i+1].type in self.t_WS:\n                                del mvalue[i+1]\n                        i += 1\n                    m = Macro(name.value,mvalue,[x[0].value for x in args] if args != [[]] else [],variadic)\n                    self.macro_prescan(m)\n                    add_macro(self, name, m)\n            else:\n                self.on_error(name.source,name.lineno,&#34;Bad macro definition&#34;)\n        #except LookupError:\n        #    print(&#34;Bad macro definition&#34;)\n        except:\n            raise\n\n    # ----------------------------------------------------------------------\n    # undef()\n    #\n    # Undefine a macro\n    # ----------------------------------------------------------------------\n\n    def undef(self,tokens):\n        &#34;&#34;&#34;Undefine a macro&#34;&#34;&#34;\n        if isinstance(tokens,STRING_TYPES):\n            tokens = self.tokenize(tokens)\n        id = tokens[0].value\n        try:\n            del self.macros[id]\n        except LookupError:\n            pass\n\n    # ----------------------------------------------------------------------\n    # parse()\n    #\n    # Parse input text.\n    # ----------------------------------------------------------------------\n    def parse(self,input,source=None,ignore={}):\n        &#34;&#34;&#34;Parse input text.&#34;&#34;&#34;\n        if isinstance(input, FILE_TYPES):\n            if source is None:\n                source = input.name\n            input = input.read()\n        self.ignore = ignore\n        self.parser = self.parsegen(input,source,os.path.abspath(source) if source else None)\n        if source is not None:\n            dname = os.path.dirname(source)\n            self.temp_path.insert(0,dname)\n        \n    # ----------------------------------------------------------------------\n    # token()\n    #\n    # Method to return individual tokens\n    # ----------------------------------------------------------------------\n    def token(self):\n        &#34;&#34;&#34;Method to return individual tokens&#34;&#34;&#34;\n        try:\n            while True:\n                tok = next(self.parser)\n                if tok.type not in self.ignore:\n                    return tok\n        except StopIteration:\n            self.parser = None\n            return None\n            \n    def write(self, oh=sys.stdout):\n        &#34;&#34;&#34;Calls token() repeatedly, expanding tokens to their text and writing to the file like stream oh&#34;&#34;&#34;\n        lastlineno = 0\n        lastsource = None\n        done = False\n        blanklines = 0\n        while not done:\n            emitlinedirective = False\n            toks = []\n            all_ws = True\n            # Accumulate a line\n            while not done:\n                tok = self.token()\n                if not tok:\n                    done = True\n                    break\n                toks.append(tok)\n                if tok.value and tok.value[0] == &#39;\\n&#39;:\n                    break\n                if tok.type not in self.t_WS:\n                    all_ws = False\n            if not toks:\n                break\n            if all_ws:\n                # Remove preceding whitespace so it becomes just a LF\n                if len(toks) &gt; 1:\n                    tok = toks[-1]\n                    toks = [ tok ]\n                blanklines += toks[0].value.count(&#39;\\n&#39;)\n                continue\n            # Filter out line continuations, collapsing before and after if needs be\n            for n in xrange(len(toks)-1, -1, -1):\n                if toks[n].type in self.t_LINECONT:\n                    if n &gt; 0 and n &lt; len(toks) - 1 and toks[n-1].type in self.t_WS and toks[n+1].type in self.t_WS:\n                        toks[n-1].value = toks[n-1].value[0]\n                        del toks[n:n+2]\n                    else:\n                        del toks[n]\n            # The line in toks is not all whitespace\n            emitlinedirective = (blanklines &gt; 6) and self.line_directive is not None\n            if hasattr(toks[0], &#39;source&#39;):\n                if lastsource is None:\n                    if toks[0].source is not None:\n                        emitlinedirective = True\n                    lastsource = toks[0].source\n                elif lastsource != toks[0].source:\n                    emitlinedirective = True\n                    lastsource = toks[0].source\n            # Replace consecutive whitespace in output with a single space except at any indent\n            first_ws = None\n            #print(toks)\n            for n in xrange(len(toks)-1, -1, -1):\n                tok = toks[n]\n                if first_ws is None:\n                    if tok.type in self.t_SPACE or len(tok.value) == 0:\n                        first_ws = n\n                else:\n                    if tok.type not in self.t_SPACE and len(tok.value) &gt; 0:\n                        m = n + 1\n                        while m != first_ws:\n                            del toks[m]\n                            first_ws -= 1\n                        first_ws = None\n                        if self.compress &gt; 0:\n                            # Collapse a token of many whitespace into single\n                            if toks[m].value and toks[m].value[0] == &#39; &#39;:\n                                toks[m].value = &#39; &#39;\n            if not self.compress &gt; 1 and not emitlinedirective:\n                newlinesneeded = toks[0].lineno - lastlineno - 1\n                if newlinesneeded &gt; 6 and self.line_directive is not None:\n                    emitlinedirective = True\n                else:\n                    while newlinesneeded &gt; 0:\n                        oh.write(&#39;\\n&#39;)\n                        newlinesneeded -= 1\n            lastlineno = toks[0].lineno\n            # Account for those newlines in a multiline comment\n            if emitlinedirective and self.line_directive is not None:\n                oh.write(self.line_directive + &#39; &#39; + str(lastlineno) + (&#39;&#39; if lastsource is None else (&#39; &#34;&#39; + lastsource + &#39;&#34;&#39; )) + &#39;\\n&#39;)\n            for tok in toks:\n                if tok.type == self.t_COMMENT1:\n                    lastlineno += tok.value.count(&#39;\\n&#39;)\n            blanklines = 0\n            #print toks[0].lineno, \n            for tok in toks:\n                #print tok.value,\n                oh.write(tok.value)</code></pre>\n                        </details>\n                        <h3>Methods</h3>\n                        <dl>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.add_path\"><code class=\"name flex\">\n<span>def <span class=\"ident\">add_path</span></span>(<span>self, path)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Adds a search path to the preprocessor.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def add_path(self,path):\n    &#34;&#34;&#34;Adds a search path to the preprocessor. &#34;&#34;&#34;\n    self.path.append(path)\n    # If the search path being added is relative, or has a common ancestor to the\n    # current working directory, add a rewrite to relativise includes from this\n    # search path\n    relpath = None\n    try:\n        relpath = os.path.relpath(path)\n    except: pass\n    if relpath is not None:\n        self.rewrite_paths += [(re.escape(os.path.abspath(path) + os.sep) + &#39;(.*)&#39;, os.path.join(relpath, &#39;\\\\1&#39;))]</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.collect_args\"><code class=\"name flex\">\n<span>def <span class=\"ident\">collect_args</span></span>(<span>self, tokenlist, ignore_errors=False)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Collects comma separated arguments from a list of tokens.\n                                        The arguments\n                                        must be enclosed in parenthesis.\n                                        Returns a tuple (tokencount,args,positions)\n                                        where tokencount is the number of tokens consumed, args is a list of arguments,\n                                        and positions is a list of integers containing the starting index of each\n                                        argument.\n                                        Each argument is represented by a list of tokens.</p>\n                                    <p>When collecting arguments, leading and trailing whitespace is removed\n                                        from each argument.\n                                    </p>\n                                    <p>This function properly handles nested parenthesis and commas&mdash;these do not\n                                        define new arguments.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def collect_args(self,tokenlist,ignore_errors=False):\n    &#34;&#34;&#34;Collects comma separated arguments from a list of tokens.   The arguments\n    must be enclosed in parenthesis.  Returns a tuple (tokencount,args,positions)\n    where tokencount is the number of tokens consumed, args is a list of arguments,\n    and positions is a list of integers containing the starting index of each\n    argument.  Each argument is represented by a list of tokens.\n    \n    When collecting arguments, leading and trailing whitespace is removed\n    from each argument.  \n    \n    This function properly handles nested parenthesis and commas---these do not\n    define new arguments.&#34;&#34;&#34;\n    args = []\n    positions = []\n    current_arg = []\n    nesting = 1\n    tokenlen = len(tokenlist)\n\n    # Search for the opening &#39;(&#39;.\n    i = 0\n    while (i &lt; tokenlen) and (tokenlist[i].type in self.t_WS):\n        i += 1\n\n    if (i &lt; tokenlen) and (tokenlist[i].value == &#39;(&#39;):\n        positions.append(i+1)\n    else:\n        if not ignore_errors:\n            self.on_error(tokenlist[0].source,tokenlist[0].lineno,&#34;Missing &#39;(&#39; in macro arguments&#34;)\n        return 0, [], []\n\n    i += 1\n\n    while i &lt; tokenlen:\n        t = tokenlist[i]\n        if t.value == &#39;(&#39;:\n            current_arg.append(t)\n            nesting += 1\n        elif t.value == &#39;)&#39;:\n            nesting -= 1\n            if nesting == 0:\n                args.append(self.tokenstrip(current_arg))\n                positions.append(i)\n                return i+1,args,positions\n            current_arg.append(t)\n        elif t.value == &#39;,&#39; and nesting == 1:\n            args.append(self.tokenstrip(current_arg))\n            positions.append(i+1)\n            current_arg = []\n        else:\n            current_arg.append(t)\n        i += 1\n\n    # Missing end argument\n    if not ignore_errors:\n        self.on_error(tokenlist[-1].source,tokenlist[-1].lineno,&#34;Missing &#39;)&#39; in macro arguments&#34;)\n    return 0, [],[]</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.define\"><code class=\"name flex\">\n<span>def <span class=\"ident\">define</span></span>(<span>self, tokens)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Define a new macro</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def define(self,tokens):\n    &#34;&#34;&#34;Define a new macro&#34;&#34;&#34;\n    if isinstance(tokens,STRING_TYPES):\n        tokens = self.tokenize(tokens)\n    else:\n        tokens = [copy.copy(tok) for tok in tokens]\n    def add_macro(self, name, macro):\n        macro.source = name.source\n        macro.lineno = name.lineno\n        self.macros[name.value] = macro\n\n    linetok = tokens\n    try:\n        name = linetok[0]\n        if len(linetok) &gt; 1:\n            mtype = linetok[1]\n        else:\n            mtype = None\n        if not mtype:\n            m = Macro(name.value,[])\n            add_macro(self, name, m)\n        elif mtype.type in self.t_WS:\n            # A normal macro\n            m = Macro(name.value,self.tokenstrip(linetok[2:]))\n            add_macro(self, name, m)\n        elif mtype.value == &#39;(&#39;:\n            # A macro with arguments\n            tokcount, args, positions = self.collect_args(linetok[1:])\n            variadic = False\n            for a in args:\n                if variadic:\n                    self.on_error(name.source,name.lineno,&#34;No more arguments may follow a variadic argument&#34;)\n                    break\n                astr = &#34;&#34;.join([str(_i.value) for _i in a])\n                if astr == &#34;...&#34;:\n                    variadic = True\n                    a[0].type = self.t_ID\n                    a[0].value = &#39;__VA_ARGS__&#39;\n                    variadic = True\n                    del a[1:]\n                    continue\n                elif astr[-3:] == &#34;...&#34; and a[0].type == self.t_ID:\n                    variadic = True\n                    del a[1:]\n                    # If, for some reason, &#34;.&#34; is part of the identifier, strip off the name for the purposes\n                    # of macro expansion\n                    if a[0].value[-3:] == &#39;...&#39;:\n                        a[0].value = a[0].value[:-3]\n                    continue\n                # Empty arguments are permitted\n                if len(a) == 0 and len(args) == 1:\n                    continue\n                if len(a) &gt; 1 or a[0].type != self.t_ID:\n                    self.on_error(a[0].source,a[0].lineno,&#34;Invalid macro argument&#34;)\n                    break\n            else:\n                mvalue = self.tokenstrip(linetok[1+tokcount:])\n                i = 0\n                while i &lt; len(mvalue):\n                    if i+1 &lt; len(mvalue):\n                        if mvalue[i].type in self.t_WS and mvalue[i+1].value == &#39;##&#39;:\n                            del mvalue[i]\n                            continue\n                        elif mvalue[i].value == &#39;##&#39; and mvalue[i+1].type in self.t_WS:\n                            del mvalue[i+1]\n                    i += 1\n                m = Macro(name.value,mvalue,[x[0].value for x in args] if args != [[]] else [],variadic)\n                self.macro_prescan(m)\n                add_macro(self, name, m)\n        else:\n            self.on_error(name.source,name.lineno,&#34;Bad macro definition&#34;)\n    #except LookupError:\n    #    print(&#34;Bad macro definition&#34;)\n    except:\n        raise</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.evalexpr\"><code class=\"name flex\">\n<span>def <span class=\"ident\">evalexpr</span></span>(<span>self, tokens)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Evaluate an expression token sequence for the purposes of evaluating\n                                        integral expressions.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def evalexpr(self,tokens):\n    &#34;&#34;&#34;Evaluate an expression token sequence for the purposes of evaluating\n    integral expressions.&#34;&#34;&#34;\n    if not tokens:\n        self.on_error(&#39;unknown&#39;, 0, &#34;Empty expression&#34;)\n        return (0, None)\n    # tokens = tokenize(line)\n    # Search for defined macros\n    partial_expansion = False\n    def replace_defined(tokens):\n        i = 0\n        while i &lt; len(tokens):\n            if tokens[i].type == self.t_ID and tokens[i].value == &#39;defined&#39;:\n                j = i + 1\n                needparen = False\n                result = &#34;0L&#34;\n                while j &lt; len(tokens):\n                    if tokens[j].type in self.t_WS:\n                        j += 1\n                        continue\n                    elif tokens[j].type == self.t_ID:\n                        if tokens[j].value in self.macros:\n                            result = &#34;1L&#34;\n                        else:\n                            repl = self.on_unknown_macro_in_defined_expr(tokens[j])\n                            if repl is None:\n                                partial_expansion = True\n                                result = &#39;defined(&#39;+tokens[j].value+&#39;)&#39;\n                            else:\n                                result = &#34;1L&#34; if repl else &#34;0L&#34;\n                        if not needparen: break\n                    elif tokens[j].value == &#39;(&#39;:\n                        needparen = True\n                    elif tokens[j].value == &#39;)&#39;:\n                        break\n                    else:\n                        self.on_error(tokens[i].source,tokens[i].lineno,&#34;Malformed defined()&#34;)\n                    j += 1\n                if result.startswith(&#39;defined&#39;):\n                    tokens[i].type = self.t_ID\n                    tokens[i].value = result\n                else:\n                    tokens[i].type = self.t_INTEGER\n                    tokens[i].value = self.t_INTEGER_TYPE(result)\n                del tokens[i+1:j+1]\n            i += 1\n        return tokens\n    # Replace any defined(macro) before macro expansion\n    tokens = replace_defined(tokens)\n    tokens = self.expand_macros(tokens)\n    # Replace any defined(macro) after macro expansion\n    tokens = replace_defined(tokens)\n    if not tokens:\n        return (0, None)\n    class IndirectToMacroHook(object):\n        def __init__(self, p):\n            self.__preprocessor = p\n            self.partial_expansion = False\n        def __contains__(self, key):\n            return True\n        def __getitem__(self, key):\n            if key.startswith(&#39;defined(&#39;):\n                self.partial_expansion = True\n                return 0\n            repl = self.__preprocessor.on_unknown_macro_in_expr(key)\n            #print(&#34;*** IndirectToMacroHook[&#34;, key, &#34;] returns&#34;, repl, file = sys.stderr)\n            if repl is None:\n                self.partial_expansion = True\n                return key\n            return repl\n    evalvars = IndirectToMacroHook(self)\n    class IndirectToMacroFunctionHook(object):\n        def __init__(self, p):\n            self.__preprocessor = p\n            self.partial_expansion = False\n        def __contains__(self, key):\n            return True\n        def __getitem__(self, key):\n            repl = self.__preprocessor.on_unknown_macro_function_in_expr(key)\n            #print(&#34;*** IndirectToMacroFunctionHook[&#34;, key, &#34;] returns&#34;, repl, file = sys.stderr)\n            if repl is None:\n                self.partial_expansion = True\n                return key\n            return repl\n    evalfuncts = IndirectToMacroFunctionHook(self)\n    try:\n        result = self.evaluator(tokens, functions = evalfuncts, identifiers = evalvars).value()\n        partial_expansion = partial_expansion or evalvars.partial_expansion or evalfuncts.partial_expansion\n    except OutputDirective:\n        raise\n    except Exception as e:\n        partial_expansion = partial_expansion or evalvars.partial_expansion or evalfuncts.partial_expansion\n        if not partial_expansion:\n            self.on_error(tokens[0].source,tokens[0].lineno,&#34;Could not evaluate expression due to %s (passed to evaluator: &#39;%s&#39;)&#34; % (repr(e), &#39;&#39;.join([tok.value for tok in tokens])))\n        result = 0\n    return (result, tokens) if partial_expansion else (result, None)</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.expand_macros\"><code class=\"name flex\">\n<span>def <span class=\"ident\">expand_macros</span></span>(<span>self, tokens, expanding_from=[])</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Given a list of tokens, this function performs macro expansion.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def expand_macros(self,tokens,expanding_from=[]):\n    &#34;&#34;&#34;Given a list of tokens, this function performs macro expansion.&#34;&#34;&#34;\n    # Each token needs to track from which macros it has been expanded from to prevent recursion\n    for tok in tokens:\n        if not hasattr(tok, &#39;expanded_from&#39;):\n            tok.expanded_from = []\n    i = 0\n    #print(&#34;*** EXPAND MACROS in&#34;, &#34;&#34;.join([t.value for t in tokens]), &#34;expanding_from=&#34;, expanding_from)\n    #print(tokens)\n    #print([(t.value, t.expanded_from) for t in tokens])\n    while i &lt; len(tokens):\n        t = tokens[i]\n        if self.linemacrodepth == 0:\n            self.linemacro = t.lineno\n        self.linemacrodepth = self.linemacrodepth + 1\n        if t.type == self.t_ID:\n            if t.value in self.macros and t.value not in t.expanded_from and t.value not in expanding_from:\n                # Yes, we found a macro match\n                m = self.macros[t.value]\n                if m.arglist is None:\n                    # A simple macro\n                    rep = [copy.copy(_x) for _x in m.value]\n                    ex = self.expand_macros(rep, expanding_from + [t.value])\n                    #print(&#34;\\nExpanding macro&#34;, m, &#34;\\ninto&#34;, ex, &#34;\\nreplacing&#34;, tokens[i:i+1])\n                    for e in ex:\n                        e.source = t.source\n                        e.lineno = t.lineno\n                        if not hasattr(e, &#39;expanded_from&#39;):\n                            e.expanded_from = []\n                        e.expanded_from.append(t.value)\n                    tokens[i:i+1] = ex\n                else:\n                    # A macro with arguments\n                    j = i + 1\n                    while j &lt; len(tokens) and (tokens[j].type in self.t_WS or tokens[j].type in self.t_COMMENT):\n                        j += 1\n                    # A function like macro without an invocation list is to be ignored\n                    if j == len(tokens) or tokens[j].value != &#39;(&#39;:\n                        i = j\n                    else:\n                        tokcount,args,positions = self.collect_args(tokens[j:], True)\n                        if tokcount == 0:\n                            # Unclosed parameter list, just bail out\n                            break\n                        if (not m.variadic\n                            # A no arg or single arg consuming macro is permitted to be expanded with nothing\n                            and (args != [[]] or len(m.arglist) &gt; 1)\n                            and len(args) !=  len(m.arglist)):\n                            self.on_error(t.source,t.lineno,&#34;Macro %s requires %d arguments but was passed %d&#34; % (t.value,len(m.arglist),len(args)))\n                            i = j + tokcount\n                        elif m.variadic and len(args) &lt; len(m.arglist)-1:\n                            if len(m.arglist) &gt; 2:\n                                self.on_error(t.source,t.lineno,&#34;Macro %s must have at least %d arguments&#34; % (t.value, len(m.arglist)-1))\n                            else:\n                                self.on_error(t.source,t.lineno,&#34;Macro %s must have at least %d argument&#34; % (t.value, len(m.arglist)-1))\n                            i = j + tokcount\n                        else:\n                            if m.variadic:\n                                if len(args) == len(m.arglist)-1:\n                                    args.append([])\n                                else:\n                                    args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]\n                                    del args[len(m.arglist):]\n                            else:\n                                # If we called a single arg macro with empty, fake extend args\n                                while len(args) &lt; len(m.arglist):\n                                    args.append([])\n                                    \n                            # Get macro replacement text\n                            rep = self.macro_expand_args(m,args)\n                            ex = self.expand_macros(rep, expanding_from + [t.value])\n                            for e in ex:\n                                e.source = t.source\n                                e.lineno = t.lineno\n                                if not hasattr(e, &#39;expanded_from&#39;):\n                                    e.expanded_from = []\n                                e.expanded_from.append(t.value)\n                            # A non-conforming extension implemented by the GCC and clang preprocessors\n                            # is that an expansion of a macro with arguments where the following token is\n                            # an identifier inserts a space between the expansion and the identifier. This\n                            # differs from Boost.Wave incidentally (see https://github.com/ned14/pcpp/issues/29)\n                            if len(tokens) &gt; j+tokcount and tokens[j+tokcount].type in self.t_ID:\n                                #print(&#34;*** token after expansion is&#34;, tokens[j+tokcount])\n                                newtok = copy.copy(tokens[j+tokcount])\n                                newtok.type = self.t_SPACE\n                                newtok.value = &#39; &#39;\n                                ex.append(newtok)\n                            #print(&#34;\\nExpanding macro&#34;, m, &#34;\\n\\ninto&#34;, ex, &#34;\\n\\nreplacing&#34;, tokens[i:j+tokcount])\n                            tokens[i:j+tokcount] = ex\n                self.linemacrodepth = self.linemacrodepth - 1\n                if self.linemacrodepth == 0:\n                    self.linemacro = 0\n                continue\n            elif self.expand_linemacro and t.value == &#39;__LINE__&#39;:\n                t.type = self.t_INTEGER\n                t.value = self.t_INTEGER_TYPE(self.linemacro)\n            elif self.expand_countermacro and t.value == &#39;__COUNTER__&#39;:\n                t.type = self.t_INTEGER\n                t.value = self.t_INTEGER_TYPE(self.countermacro)\n                self.countermacro += 1\n            \n        i += 1\n        self.linemacrodepth = self.linemacrodepth - 1\n        if self.linemacrodepth == 0:\n            self.linemacro = 0\n    return tokens</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.group_lines\"><code class=\"name flex\">\n<span>def <span class=\"ident\">group_lines</span></span>(<span>self, input, abssource)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Given an input string, this function splits it into lines.\n                                        Trailing whitespace\n                                        is removed. This function forms the lowest level of the\n                                        preprocessor&mdash;grouping\n                                        text into a line-by-line format.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def group_lines(self,input,abssource):\n    r&#34;&#34;&#34;Given an input string, this function splits it into lines.  Trailing whitespace\n    is removed. This function forms the lowest level of the preprocessor---grouping\n    text into a line-by-line format.\n    &#34;&#34;&#34;\n    lex = self.lexer.clone()\n    lines = [x.rstrip() for x in input.splitlines()]\n\n    input = &#34;\\n&#34;.join(lines)\n    lex.input(input)\n    lex.lineno = 1\n\n    current_line = []\n    while True:\n        tok = lex.token()\n        if not tok:\n            break\n        tok.source = abssource\n        current_line.append(tok)\n        if tok.type in self.t_WS and tok.value == &#39;\\n&#39;:\n            yield current_line\n            current_line = []\n\n    if current_line:\n        nltok = copy.copy(current_line[-1])\n        nltok.type = self.t_NEWLINE\n        nltok.value = &#39;\\n&#39;\n        current_line.append(nltok)\n        yield current_line</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.include\"><code class=\"name flex\">\n<span>def <span class=\"ident\">include</span></span>(<span>self, tokens)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Implementation of file-inclusion</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def include(self,tokens):\n    &#34;&#34;&#34;Implementation of file-inclusion&#34;&#34;&#34;\n    # Try to extract the filename and then process an include file\n    if not tokens:\n        return\n    if tokens:\n        if tokens[0].value != &#39;&lt;&#39; and tokens[0].type != self.t_STRING:\n            tokens = self.tokenstrip(self.expand_macros(tokens))\n\n        is_system_include = False\n        if tokens[0].value == &#39;&lt;&#39;:\n            is_system_include = True\n            # Include &lt;...&gt;\n            i = 1\n            while i &lt; len(tokens):\n                if tokens[i].value == &#39;&gt;&#39;:\n                    break\n                i += 1\n            else:\n                self.on_error(tokens[0].source,tokens[0].lineno,&#34;Malformed #include &lt;...&gt;&#34;)\n                return\n            filename = &#34;&#34;.join([x.value for x in tokens[1:i]])\n            # Search only formally specified paths\n            path = self.path\n        elif tokens[0].type == self.t_STRING:\n            filename = tokens[0].value[1:-1]\n            # Search from each nested include file, as well as formally specified paths\n            path = self.temp_path + self.path\n        else:\n            p = self.on_include_not_found(True,False,self.temp_path[0] if self.temp_path else &#39;&#39;,tokens[0].value)\n            assert p is None\n            return\n    if not path:\n        path = [&#39;&#39;]\n    while True:\n        #print path\n        for p in path:\n            iname = os.path.join(p,filename)\n            fulliname = os.path.abspath(iname)\n            if fulliname in self.include_once:\n                if self.debugout is not None:\n                    print(&#34;x:x:x x:x #include \\&#34;%s\\&#34; skipped as already seen&#34; % (fulliname), file = self.debugout)\n                return\n            try:\n                ih = self.on_file_open(is_system_include,fulliname)\n                data = ih.read()\n                ih.close()\n                dname = os.path.dirname(fulliname)\n                if dname:\n                    self.temp_path.insert(0,dname)\n                for tok in self.parsegen(data,filename,fulliname):\n                    yield tok\n                if dname:\n                    del self.temp_path[0]\n                return\n            except IOError:\n                pass\n        else:\n            p = self.on_include_not_found(False,is_system_include,self.temp_path[0] if self.temp_path else &#39;&#39;,filename)\n            assert p is not None\n            path.append(p)</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.macro_expand_args\"><code class=\"name flex\">\n<span>def <span class=\"ident\">macro_expand_args</span></span>(<span>self, macro, args)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Given a Macro and list of arguments (each a token list), this method\n                                        returns an expanded version of a macro.\n                                        The return value is a token sequence\n                                        representing the replacement macro tokens</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def macro_expand_args(self,macro,args):\n    &#34;&#34;&#34;Given a Macro and list of arguments (each a token list), this method\n    returns an expanded version of a macro.  The return value is a token sequence\n    representing the replacement macro tokens&#34;&#34;&#34;\n    # Make a copy of the macro token sequence\n    rep = [copy.copy(_x) for _x in macro.value]\n\n    # Make string expansion patches.  These do not alter the length of the replacement sequence\n    str_expansion = {}\n    for argnum, i in macro.str_patch:\n        if argnum not in str_expansion:\n            # Strip all non-space whitespace before stringization\n            tokens = copy.copy(args[argnum])\n            for j in xrange(len(tokens)):\n                if tokens[j].type in self.t_WS and tokens[j].type != self.t_LINECONT:\n                    tokens[j].value = &#39; &#39;\n            # Collapse all multiple whitespace too\n            j = 0\n            while j &lt; len(tokens) - 1:\n                if tokens[j].type in self.t_WS and tokens[j+1].type in self.t_WS:\n                    del tokens[j+1]\n                else:\n                    j += 1\n            str = &#34;&#34;.join([x.value for x in tokens])\n            str = str.replace(&#34;\\\\&#34;,&#34;\\\\\\\\&#34;).replace(&#39;&#34;&#39;, &#39;\\\\&#34;&#39;)\n            str_expansion[argnum] = &#39;&#34;&#39; + str + &#39;&#34;&#39;\n        rep[i] = copy.copy(rep[i])\n        rep[i].value = str_expansion[argnum]\n\n    # Make the variadic macro comma patch.  If the variadic macro argument is empty, we get rid\n    comma_patch = False\n    if macro.variadic and not args[-1]:\n        for i in macro.var_comma_patch:\n            rep[i] = None\n            comma_patch = True\n\n    # Make all other patches.   The order of these matters.  It is assumed that the patch list\n    # has been sorted in reverse order of patch location since replacements will cause the\n    # size of the replacement sequence to expand from the patch point.\n    \n    expanded = { }\n    #print(&#34;***&#34;, macro)\n    #print(macro.patch)\n    for ptype, argnum, i in macro.patch:\n        #print([x.value for x in rep])\n        # Concatenation.   Argument is left unexpanded\n        if ptype == &#39;t&#39;:\n            rep[i:i+1] = args[argnum]\n        # Normal expansion.  Argument is macro expanded first\n        elif ptype == &#39;e&#39;:\n            #print(&#39;*** Function macro arg&#39;, rep[i], &#39;replace with&#39;, args[argnum], &#39;which expands into&#39;, self.expand_macros(copy.copy(args[argnum])))\n            if argnum not in expanded:\n                expanded[argnum] = self.expand_macros(copy.copy(args[argnum]))\n            rep[i:i+1] = expanded[argnum]\n\n    # Get rid of removed comma if necessary\n    if comma_patch:\n        rep = [_i for _i in rep if _i]\n        \n    # Do a token concatenation pass, stitching any tokens separated by ## into a single token\n    while len(rep) and rep[0].type == self.t_DPOUND:\n        del rep[0]\n    while len(rep) and rep[-1].type == self.t_DPOUND:\n        del rep[-1]\n    i = 1\n    stitched = False\n    while i &lt; len(rep) - 1:\n        if rep[i].type == self.t_DPOUND:\n            j = i + 1\n            while rep[j].type == self.t_DPOUND:\n                j += 1\n            rep[i-1] = copy.copy(rep[i-1])\n            rep[i-1].type = None\n            rep[i-1].value += rep[j].value\n            while j &gt;= i:\n                del rep[i]\n                j -= 1\n            stitched = True\n        else:\n            i += 1\n    if stitched:\n        # Stitched tokens will have unknown type, so figure those out now\n        i = 0\n        lex = self.lexer.clone()\n        while i &lt; len(rep):\n            if rep[i].type is None:\n                lex.input(rep[i].value)\n                toks = []\n                while True:\n                    tok = lex.token()\n                    if not tok:\n                        break\n                    toks.append(tok)\n                if len(toks) != 1:\n                    # Split it once again\n                    while len(toks) &gt; 1:\n                        rep.insert(i+1, copy.copy(rep[i]))\n                        rep[i+1].value = toks[-1].value\n                        rep[i+1].type = toks[-1].type\n                        toks.pop()\n                    rep[i].value = toks[0].value\n                    rep[i].type = toks[0].type\n                else:\n                    rep[i].type = toks[0].type\n            i += 1\n\n    #print rep\n    return rep</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.macro_prescan\"><code class=\"name flex\">\n<span>def <span class=\"ident\">macro_prescan</span></span>(<span>self, macro)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Examine the macro value (token sequence) and identify patch points\n                                        This is used to speed up macro expansion later on&mdash;we'll know\n                                        right away where to apply patches to the value to form the expansion</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def macro_prescan(self,macro):\n    &#34;&#34;&#34;Examine the macro value (token sequence) and identify patch points\n    This is used to speed up macro expansion later on---we&#39;ll know\n    right away where to apply patches to the value to form the expansion&#34;&#34;&#34;\n    macro.patch     = []             # Standard macro arguments \n    macro.str_patch = []             # String conversion expansion\n    macro.var_comma_patch = []       # Variadic macro comma patch\n    i = 0\n    #print(&#34;BEFORE&#34;, macro.value)\n    #print(&#34;BEFORE&#34;, [x.value for x in macro.value])\n    while i &lt; len(macro.value):\n        if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:\n            argnum = macro.arglist.index(macro.value[i].value)\n            # Conversion of argument to a string\n            j = i - 1\n            while j &gt;= 0 and macro.value[j].type in self.t_WS:\n                j -= 1\n            if j &gt;= 0 and macro.value[j].value == &#39;#&#39;:\n                macro.value[i] = copy.copy(macro.value[i])\n                macro.value[i].type = self.t_STRING\n                while i &gt; j:\n                    del macro.value[j]\n                    i -= 1\n                macro.str_patch.append((argnum,i))\n                continue\n            # Concatenation\n            elif (i &gt; 0 and macro.value[i-1].value == &#39;##&#39;):\n                macro.patch.append((&#39;t&#39;,argnum,i))\n                i += 1\n                continue\n            elif ((i+1) &lt; len(macro.value) and macro.value[i+1].value == &#39;##&#39;):\n                macro.patch.append((&#39;t&#39;,argnum,i))\n                i += 1\n                continue\n            # Standard expansion\n            else:\n                macro.patch.append((&#39;e&#39;,argnum,i))\n        elif macro.value[i].value == &#39;##&#39;:\n            if macro.variadic and (i &gt; 0) and (macro.value[i-1].value == &#39;,&#39;) and \\\n                    ((i+1) &lt; len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \\\n                    (macro.value[i+1].value == macro.vararg):\n                macro.var_comma_patch.append(i-1)\n        i += 1\n    macro.patch.sort(key=lambda x: x[2],reverse=True)</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.parse\"><code class=\"name flex\">\n<span>def <span class=\"ident\">parse</span></span>(<span>self, input, source=None, ignore={})</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Parse input text.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def parse(self,input,source=None,ignore={}):\n    &#34;&#34;&#34;Parse input text.&#34;&#34;&#34;\n    if isinstance(input, FILE_TYPES):\n        if source is None:\n            source = input.name\n        input = input.read()\n    self.ignore = ignore\n    self.parser = self.parsegen(input,source,os.path.abspath(source) if source else None)\n    if source is not None:\n        dname = os.path.dirname(source)\n        self.temp_path.insert(0,dname)</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.parsegen\"><code class=\"name flex\">\n<span>def <span class=\"ident\">parsegen</span></span>(<span>self, input, source=None, abssource=None)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Parse an input string</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def parsegen(self,input,source=None,abssource=None):\n    &#34;&#34;&#34;Parse an input string&#34;&#34;&#34;\n    rewritten_source = source\n    if abssource:\n        rewritten_source = abssource\n        for rewrite in self.rewrite_paths:\n            temp = re.sub(rewrite[0], rewrite[1], rewritten_source)\n            if temp != abssource:\n                rewritten_source = temp\n                if os.sep != &#39;/&#39;:\n                    rewritten_source = rewritten_source.replace(os.sep, &#39;/&#39;)\n                break\n\n    # Replace trigraph sequences\n    t = trigraph(input)\n    lines = self.group_lines(t, rewritten_source)\n\n    if not source:\n        source = &#34;&#34;\n    if not rewritten_source:\n        rewritten_source = &#34;&#34;\n        \n    my_include_times_idx = len(self.include_times)\n    self.include_times.append(FileInclusionTime(self.macros[&#39;__FILE__&#39;] if &#39;__FILE__&#39; in self.macros else None, source, abssource, self.include_depth))\n    self.include_depth += 1\n    my_include_time_begin = clock()\n    if self.expand_filemacro:\n        self.define(&#34;__FILE__ \\&#34;%s\\&#34;&#34; % rewritten_source)\n\n    self.source = abssource\n    chunk = []\n    enable = True\n    iftrigger = False\n    ifpassthru = False\n    class ifstackentry(object):\n        def __init__(self,enable,iftrigger,ifpassthru,startlinetoks):\n            self.enable = enable\n            self.iftrigger = iftrigger\n            self.ifpassthru = ifpassthru\n            self.rewritten = False\n            self.startlinetoks = startlinetoks\n    ifstack = []\n    # True until any non-whitespace output or anything with effects happens.\n    at_front_of_file = True\n    # True if auto pragma once still a possibility for this #include\n    auto_pragma_once_possible = self.auto_pragma_once_enabled\n    # =(MACRO, 0) means #ifndef MACRO or #if !defined(MACRO) seen, =(MACRO,1) means #define MACRO seen\n    include_guard = None\n    self.on_potential_include_guard(None)\n\n    for x in lines:\n        all_whitespace = True\n        skip_auto_pragma_once_possible_check = False\n        # Handle comments\n        for i,tok in enumerate(x):\n            if tok.type in self.t_COMMENT:\n                if not self.on_comment(tok):\n                    if tok.type == self.t_COMMENT1:\n                        tok.value = &#39; &#39;\n                    elif tok.type == self.t_COMMENT2:\n                        tok.value = &#39;\\n&#39;\n                    tok.type = &#39;CPP_WS&#39;\n        # Skip over whitespace\n        for i,tok in enumerate(x):\n            if tok.type not in self.t_WS and tok.type not in self.t_COMMENT:\n                all_whitespace = False\n                break\n        output_and_expand_line = True\n        output_unexpanded_line = False\n        if tok.value == &#39;#&#39;:\n            precedingtoks = [ tok ]\n            output_and_expand_line = False\n            try:\n                # Preprocessor directive      \n                i += 1\n                while i &lt; len(x) and x[i].type in self.t_WS:\n                    precedingtoks.append(x[i])\n                    i += 1                    \n                dirtokens = self.tokenstrip(x[i:])\n                if dirtokens:\n                    name = dirtokens[0].value\n                    args = self.tokenstrip(dirtokens[1:])\n                \n                    if self.debugout is not None:\n                        print(&#34;%d:%d:%d %s:%d #%s %s&#34; % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno, dirtokens[0].value, &#34;&#34;.join([tok.value for tok in args])), file = self.debugout)\n                        #print(ifstack)\n\n                    handling = self.on_directive_handle(dirtokens[0],args,ifpassthru,precedingtoks)\n                    assert handling == True or handling == None\n                else:\n                    name = &#34;&#34;\n                    args = []\n                    raise OutputDirective(Action.IgnoreAndRemove)\n                    \n                if name == &#39;define&#39;:\n                    at_front_of_file = False\n                    if enable:\n                        for tok in self.expand_macros(chunk):\n                            yield tok\n                        chunk = []\n                        if include_guard and include_guard[1] == 0:\n                            if include_guard[0] == args[0].value and len(args) == 1:\n                                include_guard = (args[0].value, 1)\n                                # If ifpassthru is only turned on due to this include guard, turn it off\n                                if ifpassthru and not ifstack[-1].ifpassthru:\n                                    ifpassthru = False\n                        self.define(args)\n                        if self.debugout is not None:\n                            print(&#34;%d:%d:%d %s:%d      %s&#34; % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno, repr(self.macros[args[0].value])), file = self.debugout)\n                        if handling is None:\n                            for tok in x:\n                                yield tok\n                elif name == &#39;include&#39;:\n                    if enable:\n                        for tok in self.expand_macros(chunk):\n                            yield tok\n                        chunk = []\n                        oldfile = self.macros[&#39;__FILE__&#39;] if &#39;__FILE__&#39; in self.macros else None\n                        if args and args[0].value != &#39;&lt;&#39; and args[0].type != self.t_STRING:\n                            args = self.tokenstrip(self.expand_macros(args))\n                        #print(&#39;***&#39;, &#39;&#39;.join([x.value for x in args]), file = sys.stderr)\n                        if self.passthru_includes is not None and self.passthru_includes.match(&#39;&#39;.join([x.value for x in args])):\n                            for tok in precedingtoks:\n                                yield tok\n                            for tok in dirtokens:\n                                yield tok\n                            for tok in self.include(args):\n                                pass\n                        else:\n                            for tok in self.include(args):\n                                yield tok\n                        if oldfile is not None:\n                            self.macros[&#39;__FILE__&#39;] = oldfile\n                        self.source = abssource\n                elif name == &#39;undef&#39;:\n                    at_front_of_file = False\n                    if enable:\n                        for tok in self.expand_macros(chunk):\n                            yield tok\n                        chunk = []\n                        self.undef(args)\n                        if handling is None:\n                            for tok in x:\n                                yield tok\n                elif name == &#39;ifdef&#39;:\n                    at_front_of_file = False\n                    ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                    if enable:\n                        ifpassthru = False\n                        if not args[0].value in self.macros:\n                            res = self.on_unknown_macro_in_defined_expr(args[0])\n                            if res is None:\n                                ifpassthru = True\n                                ifstack[-1].rewritten = True\n                                raise OutputDirective(Action.IgnoreAndPassThrough)\n                            elif res is True:\n                                iftrigger = True\n                            else:\n                                enable = False\n                                iftrigger = False\n                        else:\n                            iftrigger = True\n                elif name == &#39;ifndef&#39;:\n                    if not ifstack and at_front_of_file:\n                        self.on_potential_include_guard(args[0].value)\n                        include_guard = (args[0].value, 0)\n                    at_front_of_file = False\n                    ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                    if enable:\n                        ifpassthru = False\n                        if args[0].value in self.macros:\n                            enable = False\n                            iftrigger = False\n                        else:\n                            res = self.on_unknown_macro_in_defined_expr(args[0])\n                            if res is None:\n                                ifpassthru = True\n                                ifstack[-1].rewritten = True\n                                raise OutputDirective(Action.IgnoreAndPassThrough)\n                            elif res is True:\n                                enable = False\n                                iftrigger = False\n                            else:\n                                iftrigger = True\n                elif name == &#39;if&#39;:\n                    if not ifstack and at_front_of_file:\n                        if args[0].value == &#39;!&#39; and args[1].value == &#39;defined&#39;:\n                            n = 2\n                            if args[n].value == &#39;(&#39;: n += 1\n                            self.on_potential_include_guard(args[n].value)\n                            include_guard = (args[n].value, 0)\n                    at_front_of_file = False\n                    ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                    if enable:\n                        iftrigger = False\n                        ifpassthru = False\n                        result, rewritten = self.evalexpr(args)\n                        if rewritten is not None:\n                            x = x[:i+2] + rewritten + [x[-1]]\n                            x[i+1] = copy.copy(x[i+1])\n                            x[i+1].type = self.t_SPACE\n                            x[i+1].value = &#39; &#39;\n                            ifpassthru = True\n                            ifstack[-1].rewritten = True\n                            raise OutputDirective(Action.IgnoreAndPassThrough)\n                        if not result:\n                            enable = False\n                        else:\n                            iftrigger = True\n                elif name == &#39;elif&#39;:\n                    at_front_of_file = False\n                    if ifstack:\n                        if ifstack[-1].enable:     # We only pay attention if outer &#34;if&#34; allows this\n                            if enable and not ifpassthru:         # If already true, we flip enable False\n                                enable = False\n                            elif not iftrigger:   # If False, but not triggered yet, we&#39;ll check expression\n                                result, rewritten = self.evalexpr(args)\n                                if rewritten is not None:\n                                    enable = True\n                                    if not ifpassthru:\n                                        # This is a passthru #elif after a False #if, so convert to an #if\n                                        x[i].value = &#39;if&#39;\n                                    x = x[:i+2] + rewritten + [x[-1]]\n                                    x[i+1] = copy.copy(x[i+1])\n                                    x[i+1].type = self.t_SPACE\n                                    x[i+1].value = &#39; &#39;\n                                    ifpassthru = True\n                                    ifstack[-1].rewritten = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                if ifpassthru:\n                                    # If this elif can only ever be true, simulate that\n                                    if result:\n                                        newtok = copy.copy(x[i+3])\n                                        newtok.type = self.t_INTEGER\n                                        newtok.value = self.t_INTEGER_TYPE(result)\n                                        x = x[:i+2] + [newtok] + [x[-1]]\n                                        raise OutputDirective(Action.IgnoreAndPassThrough)\n                                    # Otherwise elide\n                                    enable = False\n                                elif result:\n                                    enable  = True\n                                    iftrigger = True\n                    else:\n                        self.on_error(dirtokens[0].source,dirtokens[0].lineno,&#34;Misplaced #elif&#34;)\n                        \n                elif name == &#39;else&#39;:\n                    at_front_of_file = False\n                    if ifstack:\n                        if ifstack[-1].enable:\n                            if ifpassthru:\n                                enable = True\n                                raise OutputDirective(Action.IgnoreAndPassThrough)\n                            if enable:\n                                enable = False\n                            elif not iftrigger:\n                                enable = True\n                                iftrigger = True\n                    else:\n                        self.on_error(dirtokens[0].source,dirtokens[0].lineno,&#34;Misplaced #else&#34;)\n\n                elif name == &#39;endif&#39;:\n                    at_front_of_file = False\n                    if ifstack:\n                        oldifstackentry = ifstack.pop()\n                        enable = oldifstackentry.enable\n                        iftrigger = oldifstackentry.iftrigger\n                        ifpassthru = oldifstackentry.ifpassthru\n                        if self.debugout is not None:\n                            print(&#34;%d:%d:%d %s:%d      (%s:%d %s)&#34; % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno,\n                                oldifstackentry.startlinetoks[0].source, oldifstackentry.startlinetoks[0].lineno, &#34;&#34;.join([n.value for n in oldifstackentry.startlinetoks])), file = self.debugout)\n                        skip_auto_pragma_once_possible_check = True\n                        if oldifstackentry.rewritten:\n                            raise OutputDirective(Action.IgnoreAndPassThrough)\n                    else:\n                        self.on_error(dirtokens[0].source,dirtokens[0].lineno,&#34;Misplaced #endif&#34;)\n                elif name == &#39;pragma&#39; and args[0].value == &#39;once&#39;:\n                    if enable:\n                        self.include_once[self.source] = None\n                elif enable:\n                    # Unknown preprocessor directive\n                    output_unexpanded_line = (self.on_directive_unknown(dirtokens[0], args, ifpassthru, precedingtoks) is None)\n\n            except OutputDirective as e:\n                if e.action == Action.IgnoreAndPassThrough:\n                    output_unexpanded_line = True\n                elif e.action == Action.IgnoreAndRemove:\n                    pass\n                else:\n                    assert False\n\n        # If there is ever any non-whitespace output outside an include guard, auto pragma once is not possible\n        if not skip_auto_pragma_once_possible_check and auto_pragma_once_possible and not ifstack and not all_whitespace:\n            auto_pragma_once_possible = False\n            if self.debugout is not None:\n                print(&#34;%d:%d:%d %s:%d Determined that #include \\&#34;%s\\&#34; is not entirely wrapped in an include guard macro, disabling auto-applying #pragma once&#34; % (enable, iftrigger, ifpassthru, x[0].source, x[0].lineno, self.source), file = self.debugout)\n            \n        if output_and_expand_line or output_unexpanded_line:\n            if not all_whitespace:\n                at_front_of_file = False\n\n            # Normal text\n            if enable:\n                if output_and_expand_line:\n                    chunk.extend(x)\n                elif output_unexpanded_line:\n                    for tok in self.expand_macros(chunk):\n                        yield tok\n                    chunk = []\n                    for tok in x:\n                        yield tok\n            else:\n                # Need to extend with the same number of blank lines\n                i = 0\n                while i &lt; len(x):\n                    if x[i].type not in self.t_WS:\n                        del x[i]\n                    else:\n                        i += 1\n                chunk.extend(x)\n\n    for tok in self.expand_macros(chunk):\n        yield tok\n    chunk = []\n    for i in ifstack:\n        self.on_error(i.startlinetoks[0].source, i.startlinetoks[0].lineno, &#34;Unterminated &#34; + &#34;&#34;.join([n.value for n in i.startlinetoks]))\n    if auto_pragma_once_possible and include_guard and include_guard[1] == 1:\n        if self.debugout is not None:\n            print(&#34;%d:%d:%d %s:%d Determined that #include \\&#34;%s\\&#34; is entirely wrapped in an include guard macro called %s, auto-applying #pragma once&#34; % (enable, iftrigger, ifpassthru, self.source, 0, self.source, include_guard[0]), file = self.debugout)\n        self.include_once[self.source] = include_guard[0]\n    elif self.auto_pragma_once_enabled and self.source not in self.include_once:\n        if self.debugout is not None:\n            print(&#34;%d:%d:%d %s:%d Did not auto apply #pragma once to this file due to auto_pragma_once_possible=%d, include_guard=%s&#34; % (enable, iftrigger, ifpassthru, self.source, 0, auto_pragma_once_possible, repr(include_guard)), file = self.debugout)\n    my_include_time_end = clock()\n    self.include_times[my_include_times_idx].elapsed = my_include_time_end - my_include_time_begin\n    self.include_depth -= 1</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.token\"><code class=\"name flex\">\n<span>def <span class=\"ident\">token</span></span>(<span>self)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Method to return individual tokens</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def token(self):\n    &#34;&#34;&#34;Method to return individual tokens&#34;&#34;&#34;\n    try:\n        while True:\n            tok = next(self.parser)\n            if tok.type not in self.ignore:\n                return tok\n    except StopIteration:\n        self.parser = None\n        return None</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.tokenize\"><code class=\"name flex\">\n<span>def <span class=\"ident\">tokenize</span></span>(<span>self, text)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Utility function. Given a string of text, tokenize into a list of tokens</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def tokenize(self,text):\n    &#34;&#34;&#34;Utility function. Given a string of text, tokenize into a list of tokens&#34;&#34;&#34;\n    tokens = []\n    self.lexer.input(text)\n    while True:\n        tok = self.lexer.token()\n        if not tok: break\n        tok.source = &#39;&#39;\n        tokens.append(tok)\n    return tokens</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.tokenstrip\"><code class=\"name flex\">\n<span>def <span class=\"ident\">tokenstrip</span></span>(<span>self, tokens)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Remove leading/trailing whitespace tokens from a token list</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def tokenstrip(self,tokens):\n    &#34;&#34;&#34;Remove leading/trailing whitespace tokens from a token list&#34;&#34;&#34;\n    i = 0\n    while i &lt; len(tokens) and tokens[i].type in self.t_WS:\n        i += 1\n    del tokens[:i]\n    i = len(tokens)-1\n    while i &gt;= 0 and tokens[i].type in self.t_WS:\n        i -= 1\n    del tokens[i+1:]\n    return tokens</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.undef\"><code class=\"name flex\">\n<span>def <span class=\"ident\">undef</span></span>(<span>self, tokens)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Undefine a macro</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def undef(self,tokens):\n    &#34;&#34;&#34;Undefine a macro&#34;&#34;&#34;\n    if isinstance(tokens,STRING_TYPES):\n        tokens = self.tokenize(tokens)\n    id = tokens[0].value\n    try:\n        del self.macros[id]\n    except LookupError:\n        pass</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.Preprocessor.write\"><code class=\"name flex\">\n<span>def <span class=\"ident\">write</span></span>(<span>self, oh=&lt;_io.TextIOWrapper name=&#39;&lt;stdout&gt;&#39; mode=&#39;w&#39; encoding=&#39;utf-8&#39;&gt;)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Calls token() repeatedly, expanding tokens to their text and writing to the file\n                                        like stream oh</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def write(self, oh=sys.stdout):\n    &#34;&#34;&#34;Calls token() repeatedly, expanding tokens to their text and writing to the file like stream oh&#34;&#34;&#34;\n    lastlineno = 0\n    lastsource = None\n    done = False\n    blanklines = 0\n    while not done:\n        emitlinedirective = False\n        toks = []\n        all_ws = True\n        # Accumulate a line\n        while not done:\n            tok = self.token()\n            if not tok:\n                done = True\n                break\n            toks.append(tok)\n            if tok.value and tok.value[0] == &#39;\\n&#39;:\n                break\n            if tok.type not in self.t_WS:\n                all_ws = False\n        if not toks:\n            break\n        if all_ws:\n            # Remove preceding whitespace so it becomes just a LF\n            if len(toks) &gt; 1:\n                tok = toks[-1]\n                toks = [ tok ]\n            blanklines += toks[0].value.count(&#39;\\n&#39;)\n            continue\n        # Filter out line continuations, collapsing before and after if needs be\n        for n in xrange(len(toks)-1, -1, -1):\n            if toks[n].type in self.t_LINECONT:\n                if n &gt; 0 and n &lt; len(toks) - 1 and toks[n-1].type in self.t_WS and toks[n+1].type in self.t_WS:\n                    toks[n-1].value = toks[n-1].value[0]\n                    del toks[n:n+2]\n                else:\n                    del toks[n]\n        # The line in toks is not all whitespace\n        emitlinedirective = (blanklines &gt; 6) and self.line_directive is not None\n        if hasattr(toks[0], &#39;source&#39;):\n            if lastsource is None:\n                if toks[0].source is not None:\n                    emitlinedirective = True\n                lastsource = toks[0].source\n            elif lastsource != toks[0].source:\n                emitlinedirective = True\n                lastsource = toks[0].source\n        # Replace consecutive whitespace in output with a single space except at any indent\n        first_ws = None\n        #print(toks)\n        for n in xrange(len(toks)-1, -1, -1):\n            tok = toks[n]\n            if first_ws is None:\n                if tok.type in self.t_SPACE or len(tok.value) == 0:\n                    first_ws = n\n            else:\n                if tok.type not in self.t_SPACE and len(tok.value) &gt; 0:\n                    m = n + 1\n                    while m != first_ws:\n                        del toks[m]\n                        first_ws -= 1\n                    first_ws = None\n                    if self.compress &gt; 0:\n                        # Collapse a token of many whitespace into single\n                        if toks[m].value and toks[m].value[0] == &#39; &#39;:\n                            toks[m].value = &#39; &#39;\n        if not self.compress &gt; 1 and not emitlinedirective:\n            newlinesneeded = toks[0].lineno - lastlineno - 1\n            if newlinesneeded &gt; 6 and self.line_directive is not None:\n                emitlinedirective = True\n            else:\n                while newlinesneeded &gt; 0:\n                    oh.write(&#39;\\n&#39;)\n                    newlinesneeded -= 1\n        lastlineno = toks[0].lineno\n        # Account for those newlines in a multiline comment\n        if emitlinedirective and self.line_directive is not None:\n            oh.write(self.line_directive + &#39; &#39; + str(lastlineno) + (&#39;&#39; if lastsource is None else (&#39; &#34;&#39; + lastsource + &#39;&#34;&#39; )) + &#39;\\n&#39;)\n        for tok in toks:\n            if tok.type == self.t_COMMENT1:\n                lastlineno += tok.value.count(&#39;\\n&#39;)\n        blanklines = 0\n        #print toks[0].lineno, \n        for tok in toks:\n            #print tok.value,\n            oh.write(tok.value)</code></pre>\n                                </details>\n                            </dd>\n                        </dl>\n                        <h3>Inherited members</h3>\n                        <ul class=\"hlist\">\n                            <li><code><b><a title=\"pcpp.parser.PreprocessorHooks\" href=\"parser.html#pcpp.parser.PreprocessorHooks\">PreprocessorHooks</a></b></code>:\n                                <ul class=\"hlist\">\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.__init__\" href=\"parser.html#pcpp.parser.PreprocessorHooks.__init__\">__init__</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_comment\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_comment\">on_comment</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_directive_handle\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_directive_handle\">on_directive_handle</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_directive_unknown\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_directive_unknown\">on_directive_unknown</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_error\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_error\">on_error</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_file_open\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_file_open\">on_file_open</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_include_not_found\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_include_not_found\">on_include_not_found</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_potential_include_guard\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_potential_include_guard\">on_potential_include_guard</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_unknown_macro_function_in_expr\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_unknown_macro_function_in_expr\">on_unknown_macro_function_in_expr</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_unknown_macro_in_defined_expr\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_unknown_macro_in_defined_expr\">on_unknown_macro_in_defined_expr</a></code>\n                                    </li>\n                                    <li><code><a title=\"pcpp.parser.PreprocessorHooks.on_unknown_macro_in_expr\" href=\"parser.html#pcpp.parser.PreprocessorHooks.on_unknown_macro_in_expr\">on_unknown_macro_in_expr</a></code>\n                                    </li>\n                                </ul>\n                            </li>\n                        </ul>\n                    </dd>\n                    <dt id=\"pcpp.preprocessor.PreprocessorHooks\"><code class=\"flex name class\">\n<span>class <span class=\"ident\">PreprocessorHooks</span></span>\n</code></dt>\n                    <dd>\n                        <section class=\"desc\">\n                            <p>Override these in your subclass of Preprocessor to customise preprocessing</p>\n                        </section>\n                        <details class=\"source\">\n                            <summary>Source code</summary>\n                            <pre><code class=\"python\">class PreprocessorHooks(object):\n    &#34;&#34;&#34;Override these in your subclass of Preprocessor to customise preprocessing&#34;&#34;&#34;\n    def __init__(self):\n        self.lastdirective = None\n\n    def on_error(self,file,line,msg):\n        &#34;&#34;&#34;Called when the preprocessor has encountered an error, e.g. malformed input.\n        \n        The default simply prints to stderr and increments the return code.\n        &#34;&#34;&#34;\n        print(&#34;%s:%d error: %s&#34; % (file,line,msg), file = sys.stderr)\n        self.return_code += 1\n        \n    def on_file_open(self,is_system_include,includepath):\n        &#34;&#34;&#34;Called to open a file for reading.\n        \n        This hook provides the ability to use ``chardet``, or any other mechanism,\n        to inspect a file for its text encoding, and open it appropriately. Be\n        aware that this function is used to probe for possible include file locations,\n        so ``includepath`` may not exist. If it does not, raise the appropriate\n        ``IOError`` exception.\n        \n        The default calls ``io.open(includepath, &#39;r&#39;, encoding = self.assume_encoding)``,\n        examines if it starts with a BOM (if so, it removes it), and returns the file\n        object opened. This raises the appropriate exception if the path was not found.\n        &#34;&#34;&#34;\n        if sys.version_info.major &lt; 3:\n            assert self.assume_encoding is None\n            ret = open(includepath, &#39;r&#39;)\n        else:\n            ret = open(includepath, &#39;r&#39;, encoding = self.assume_encoding)\n        bom = ret.read(1)\n        #print(repr(bom))\n        if bom != &#39;\\ufeff&#39;:\n            ret.seek(0)\n        return ret\n\n    def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n        &#34;&#34;&#34;Called when a #include wasn&#39;t found.\n        \n        Raise OutputDirective to pass through or remove, else return\n        a suitable path. Remember that Preprocessor.add_path() lets you add search paths.\n        \n        The default calls ``self.on_error()`` with a suitable error message about the\n        include file not found if ``is_malformed`` is False, else a suitable error\n        message about a malformed #include, and in both cases raises OutputDirective\n        (pass through).\n        &#34;&#34;&#34;\n        if is_malformed:\n            self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Malformed #include statement: %s&#34; % includepath)\n        else:\n            self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Include file &#39;%s&#39; not found&#34; % includepath)\n        raise OutputDirective(Action.IgnoreAndPassThrough)\n        \n    def on_unknown_macro_in_defined_expr(self,tok):\n        &#34;&#34;&#34;Called when an expression passed to an #if contained a defined operator\n        performed on something unknown.\n        \n        Return True if to treat it as defined, False if to treat it as undefined,\n        raise OutputDirective to pass through without execution, or return None to\n        pass through the mostly expanded #if expression apart from the unknown defined.\n        \n        The default returns False, as per the C standard.\n        &#34;&#34;&#34;\n        return False\n\n    def on_unknown_macro_in_expr(self,ident):\n        &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown identifier.\n        \n        Return what value the expression evaluator ought to use, or return None to\n        pass through the mostly expanded #if expression.\n        \n        The default returns an integer 0, as per the C standard.\n        &#34;&#34;&#34;\n        return 0\n    \n    def on_unknown_macro_function_in_expr(self,ident):\n        &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown function.\n        \n        Return a callable which will be invoked by the expression evaluator to\n        evaluate the input to the function, or return None to pass through the\n        mostly expanded #if expression.\n        \n        The default returns a lambda which returns integer 0, as per the C standard.\n        &#34;&#34;&#34;\n        return lambda x : 0\n    \n    def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n        &#34;&#34;&#34;Called when there is one of\n        \n        define, include, undef, ifdef, ifndef, if, elif, else, endif\n        \n        Return True to execute and remove from the output, raise OutputDirective\n        to pass through or remove without execution, or return None to execute\n        AND pass through to the output (this only works for #define, #undef).\n        \n        The default returns True (execute and remove from the output).\n\n        directive is the directive, toks is the tokens after the directive,\n        ifpassthru is whether we are in passthru mode, precedingtoks is the\n        tokens preceding the directive from the # token until the directive.\n        &#34;&#34;&#34;\n        self.lastdirective = directive\n        return True\n        \n    def on_directive_unknown(self,directive,toks,ifpassthru,precedingtoks):\n        &#34;&#34;&#34;Called when the preprocessor encounters a #directive it doesn&#39;t understand.\n        This is actually quite an extensive list as it currently only understands:\n        \n        define, include, undef, ifdef, ifndef, if, elif, else, endif\n        \n        Return True to remove from the output, raise OutputDirective\n        to pass through or remove, or return None to\n        pass through into the output.\n        \n        The default handles #error and #warning by printing to stderr and returning True\n        (remove from output). For everything else it returns None (pass through into output).\n\n        directive is the directive, toks is the tokens after the directive,\n        ifpassthru is whether we are in passthru mode, precedingtoks is the\n        tokens preceding the directive from the # token until the directive.\n        &#34;&#34;&#34;\n        if directive.value == &#39;error&#39;:\n            print(&#34;%s:%d error: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n            self.return_code += 1\n            return True\n        elif directive.value == &#39;warning&#39;:\n            print(&#34;%s:%d warning: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n            return True\n        return None\n        \n    def on_potential_include_guard(self,macro):\n        &#34;&#34;&#34;Called when the preprocessor encounters an #ifndef macro or an #if !defined(macro)\n        as the first non-whitespace thing in a file. Unlike the other hooks, macro is a string,\n        not a token.\n        &#34;&#34;&#34;\n        pass\n    \n    def on_comment(self,tok):\n        &#34;&#34;&#34;Called when the preprocessor encounters a comment token. You can modify the token\n        in place. You must return True to let the comment pass through, else it will be removed.\n        \n        Returning False or None modifies the token to become whitespace, becoming a single space\n        if the comment is a block comment, else a single new line if the comment is a line comment.\n        &#34;&#34;&#34;\n        return None</code></pre>\n                        </details>\n                        <h3>Subclasses</h3>\n                        <ul class=\"hlist\">\n                            <li><a title=\"pcpp.preprocessor.Preprocessor\"\n                                    href=\"#pcpp.preprocessor.Preprocessor\">Preprocessor</a></li>\n                        </ul>\n                        <h3>Methods</h3>\n                        <dl>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.__init__\"><code class=\"name flex\">\n<span>def <span class=\"ident\">__init__</span></span>(<span>self)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Initialize self.\n                                        See help(type(self)) for accurate signature.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def __init__(self):\n    self.lastdirective = None</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_comment\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_comment</span></span>(<span>self, tok)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when the preprocessor encounters a comment token. You can modify the token\n                                        in place. You must return True to let the comment pass through, else it will be\n                                        removed.</p>\n                                    <p>Returning False or None modifies the token to become whitespace, becoming a\n                                        single space\n                                        if the comment is a block comment, else a single new line if the comment is a\n                                        line comment.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_comment(self,tok):\n    &#34;&#34;&#34;Called when the preprocessor encounters a comment token. You can modify the token\n    in place. You must return True to let the comment pass through, else it will be removed.\n    \n    Returning False or None modifies the token to become whitespace, becoming a single space\n    if the comment is a block comment, else a single new line if the comment is a line comment.\n    &#34;&#34;&#34;\n    return None</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_directive_handle\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_directive_handle</span></span>(<span>self, directive, toks, ifpassthru, precedingtoks)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when there is one of</p>\n                                    <p>define, include, undef, ifdef, ifndef, if, elif, else, endif</p>\n                                    <p>Return True to execute and remove from the output, raise OutputDirective\n                                        to pass through or remove without execution, or return None to execute\n                                        AND pass through to the output (this only works for #define, #undef).</p>\n                                    <p>The default returns True (execute and remove from the output).</p>\n                                    <p>directive is the directive, toks is the tokens after the directive,\n                                        ifpassthru is whether we are in passthru mode, precedingtoks is the\n                                        tokens preceding the directive from the # token until the directive.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n    &#34;&#34;&#34;Called when there is one of\n    \n    define, include, undef, ifdef, ifndef, if, elif, else, endif\n    \n    Return True to execute and remove from the output, raise OutputDirective\n    to pass through or remove without execution, or return None to execute\n    AND pass through to the output (this only works for #define, #undef).\n    \n    The default returns True (execute and remove from the output).\n\n    directive is the directive, toks is the tokens after the directive,\n    ifpassthru is whether we are in passthru mode, precedingtoks is the\n    tokens preceding the directive from the # token until the directive.\n    &#34;&#34;&#34;\n    self.lastdirective = directive\n    return True</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_directive_unknown\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_directive_unknown</span></span>(<span>self, directive, toks, ifpassthru, precedingtoks)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when the preprocessor encounters a #directive it doesn't understand.\n                                        This is actually quite an extensive list as it currently only understands:</p>\n                                    <p>define, include, undef, ifdef, ifndef, if, elif, else, endif</p>\n                                    <p>Return True to remove from the output, raise OutputDirective\n                                        to pass through or remove, or return None to\n                                        pass through into the output.</p>\n                                    <p>The default handles #error and #warning by printing to stderr and returning True\n                                        (remove from output). For everything else it returns None (pass through into\n                                        output).</p>\n                                    <p>directive is the directive, toks is the tokens after the directive,\n                                        ifpassthru is whether we are in passthru mode, precedingtoks is the\n                                        tokens preceding the directive from the # token until the directive.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_directive_unknown(self,directive,toks,ifpassthru,precedingtoks):\n    &#34;&#34;&#34;Called when the preprocessor encounters a #directive it doesn&#39;t understand.\n    This is actually quite an extensive list as it currently only understands:\n    \n    define, include, undef, ifdef, ifndef, if, elif, else, endif\n    \n    Return True to remove from the output, raise OutputDirective\n    to pass through or remove, or return None to\n    pass through into the output.\n    \n    The default handles #error and #warning by printing to stderr and returning True\n    (remove from output). For everything else it returns None (pass through into output).\n\n    directive is the directive, toks is the tokens after the directive,\n    ifpassthru is whether we are in passthru mode, precedingtoks is the\n    tokens preceding the directive from the # token until the directive.\n    &#34;&#34;&#34;\n    if directive.value == &#39;error&#39;:\n        print(&#34;%s:%d error: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n        self.return_code += 1\n        return True\n    elif directive.value == &#39;warning&#39;:\n        print(&#34;%s:%d warning: %s&#34; % (directive.source,directive.lineno,&#39;&#39;.join(tok.value for tok in toks)), file = sys.stderr)\n        return True\n    return None</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_error\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_error</span></span>(<span>self, file, line, msg)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when the preprocessor has encountered an error, e.g. malformed input.</p>\n                                    <p>The default simply prints to stderr and increments the return code.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_error(self,file,line,msg):\n    &#34;&#34;&#34;Called when the preprocessor has encountered an error, e.g. malformed input.\n    \n    The default simply prints to stderr and increments the return code.\n    &#34;&#34;&#34;\n    print(&#34;%s:%d error: %s&#34; % (file,line,msg), file = sys.stderr)\n    self.return_code += 1</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_file_open\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_file_open</span></span>(<span>self, is_system_include, includepath)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called to open a file for reading.</p>\n                                    <p>This hook provides the ability to use <code>chardet</code>, or any other\n                                        mechanism,\n                                        to inspect a file for its text encoding, and open it appropriately. Be\n                                        aware that this function is used to probe for possible include file locations,\n                                        so <code>includepath</code> may not exist. If it does not, raise the appropriate\n                                        <code>IOError</code> exception.\n                                    </p>\n                                    <p>The default calls\n                                        <code>io.open(includepath, 'r', encoding = self.assume_encoding)</code>,\n                                        examines if it starts with a BOM (if so, it removes it), and returns the file\n                                        object opened. This raises the appropriate exception if the path was not found.\n                                    </p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_file_open(self,is_system_include,includepath):\n    &#34;&#34;&#34;Called to open a file for reading.\n    \n    This hook provides the ability to use ``chardet``, or any other mechanism,\n    to inspect a file for its text encoding, and open it appropriately. Be\n    aware that this function is used to probe for possible include file locations,\n    so ``includepath`` may not exist. If it does not, raise the appropriate\n    ``IOError`` exception.\n    \n    The default calls ``io.open(includepath, &#39;r&#39;, encoding = self.assume_encoding)``,\n    examines if it starts with a BOM (if so, it removes it), and returns the file\n    object opened. This raises the appropriate exception if the path was not found.\n    &#34;&#34;&#34;\n    if sys.version_info.major &lt; 3:\n        assert self.assume_encoding is None\n        ret = open(includepath, &#39;r&#39;)\n    else:\n        ret = open(includepath, &#39;r&#39;, encoding = self.assume_encoding)\n    bom = ret.read(1)\n    #print(repr(bom))\n    if bom != &#39;\\ufeff&#39;:\n        ret.seek(0)\n    return ret</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_include_not_found\"><code class=\"name flex\">\n<span>def <span class=\"ident\">on_include_not_found</span></span>(<span>self, is_malformed, is_system_include, curdir, includepath)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when a #include wasn't found.</p>\n                                    <p>Raise OutputDirective to pass through or remove, else return\n                                        a suitable path. Remember that Preprocessor.add_path() lets you add search\n                                        paths.</p>\n                                    <p>The default calls <code>self.on_error()</code> with a suitable error message\n                                        about the\n                                        include file not found if <code>is_malformed</code> is False, else a suitable\n                                        error\n                                        message about a malformed #include, and in both cases raises OutputDirective\n                                        (pass through).</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n    &#34;&#34;&#34;Called when a #include wasn&#39;t found.\n    \n    Raise OutputDirective to pass through or remove, else return\n    a suitable path. Remember that Preprocessor.add_path() lets you add search paths.\n    \n    The default calls ``self.on_error()`` with a suitable error message about the\n    include file not found if ``is_malformed`` is False, else a suitable error\n    message about a malformed #include, and in both cases raises OutputDirective\n    (pass through).\n    &#34;&#34;&#34;\n    if is_malformed:\n        self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Malformed #include statement: %s&#34; % includepath)\n    else:\n        self.on_error(self.lastdirective.source,self.lastdirective.lineno, &#34;Include file &#39;%s&#39; not found&#34; % includepath)\n    raise OutputDirective(Action.IgnoreAndPassThrough)</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_potential_include_guard\"><code\n                                    class=\"name flex\">\n<span>def <span class=\"ident\">on_potential_include_guard</span></span>(<span>self, macro)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when the preprocessor encounters an #ifndef macro or an #if\n                                        !defined(macro)\n                                        as the first non-whitespace thing in a file. Unlike the other hooks, macro is a\n                                        string,\n                                        not a token.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_potential_include_guard(self,macro):\n    &#34;&#34;&#34;Called when the preprocessor encounters an #ifndef macro or an #if !defined(macro)\n    as the first non-whitespace thing in a file. Unlike the other hooks, macro is a string,\n    not a token.\n    &#34;&#34;&#34;\n    pass</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_unknown_macro_function_in_expr\"><code\n                                    class=\"name flex\">\n<span>def <span class=\"ident\">on_unknown_macro_function_in_expr</span></span>(<span>self, ident)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when an expression passed to an #if contained an unknown function.</p>\n                                    <p>Return a callable which will be invoked by the expression evaluator to\n                                        evaluate the input to the function, or return None to pass through the\n                                        mostly expanded #if expression.</p>\n                                    <p>The default returns a lambda which returns integer 0, as per the C standard.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_unknown_macro_function_in_expr(self,ident):\n    &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown function.\n    \n    Return a callable which will be invoked by the expression evaluator to\n    evaluate the input to the function, or return None to pass through the\n    mostly expanded #if expression.\n    \n    The default returns a lambda which returns integer 0, as per the C standard.\n    &#34;&#34;&#34;\n    return lambda x : 0</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_unknown_macro_in_defined_expr\"><code\n                                    class=\"name flex\">\n<span>def <span class=\"ident\">on_unknown_macro_in_defined_expr</span></span>(<span>self, tok)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when an expression passed to an #if contained a defined operator\n                                        performed on something unknown.</p>\n                                    <p>Return True if to treat it as defined, False if to treat it as undefined,\n                                        raise OutputDirective to pass through without execution, or return None to\n                                        pass through the mostly expanded #if expression apart from the unknown defined.\n                                    </p>\n                                    <p>The default returns False, as per the C standard.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_unknown_macro_in_defined_expr(self,tok):\n    &#34;&#34;&#34;Called when an expression passed to an #if contained a defined operator\n    performed on something unknown.\n    \n    Return True if to treat it as defined, False if to treat it as undefined,\n    raise OutputDirective to pass through without execution, or return None to\n    pass through the mostly expanded #if expression apart from the unknown defined.\n    \n    The default returns False, as per the C standard.\n    &#34;&#34;&#34;\n    return False</code></pre>\n                                </details>\n                            </dd>\n                            <dt id=\"pcpp.preprocessor.PreprocessorHooks.on_unknown_macro_in_expr\"><code\n                                    class=\"name flex\">\n<span>def <span class=\"ident\">on_unknown_macro_in_expr</span></span>(<span>self, ident)</span>\n</code></dt>\n                            <dd>\n                                <section class=\"desc\">\n                                    <p>Called when an expression passed to an #if contained an unknown identifier.</p>\n                                    <p>Return what value the expression evaluator ought to use, or return None to\n                                        pass through the mostly expanded #if expression.</p>\n                                    <p>The default returns an integer 0, as per the C standard.</p>\n                                </section>\n                                <details class=\"source\">\n                                    <summary>Source code</summary>\n                                    <pre><code class=\"python\">def on_unknown_macro_in_expr(self,ident):\n    &#34;&#34;&#34;Called when an expression passed to an #if contained an unknown identifier.\n    \n    Return what value the expression evaluator ought to use, or return None to\n    pass through the mostly expanded #if expression.\n    \n    The default returns an integer 0, as per the C standard.\n    &#34;&#34;&#34;\n    return 0</code></pre>\n                                </details>\n                            </dd>\n                        </dl>\n                    </dd>\n                </dl>\n            </section>\n        </article>\n        <nav id=\"sidebar\">\n            <h1>Index</h1>\n            <div class=\"toc\">\n                <ul></ul>\n            </div>\n            <ul id=\"index\">\n                <li>\n                    <h3>Super-module</h3>\n                    <ul>\n                        <li><code><a title=\"pcpp\" href=\"index.html\">pcpp</a></code></li>\n                    </ul>\n                </li>\n                <li>\n                    <h3><a href=\"#header-classes\">Classes</a></h3>\n                    <ul>\n                        <li>\n                            <h4><code><a title=\"pcpp.preprocessor.Action\" href=\"#pcpp.preprocessor.Action\">Action</a></code>\n                            </h4>\n                            <ul class=\"\">\n                                <li><code><a title=\"pcpp.preprocessor.Action.IgnoreAndPassThrough\" href=\"#pcpp.preprocessor.Action.IgnoreAndPassThrough\">IgnoreAndPassThrough</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Action.IgnoreAndRemove\" href=\"#pcpp.preprocessor.Action.IgnoreAndRemove\">IgnoreAndRemove</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                        <li>\n                            <h4><code><a title=\"pcpp.preprocessor.Evaluator\" href=\"#pcpp.preprocessor.Evaluator\">Evaluator</a></code>\n                            </h4>\n                            <ul class=\"\">\n                                <li><code><a title=\"pcpp.preprocessor.Evaluator.__init__\" href=\"#pcpp.preprocessor.Evaluator.__init__\">__init__</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                        <li>\n                            <h4><code><a title=\"pcpp.preprocessor.OutputDirective\" href=\"#pcpp.preprocessor.OutputDirective\">OutputDirective</a></code>\n                            </h4>\n                            <ul class=\"\">\n                                <li><code><a title=\"pcpp.preprocessor.OutputDirective.__init__\" href=\"#pcpp.preprocessor.OutputDirective.__init__\">__init__</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                        <li>\n                            <h4><code><a title=\"pcpp.preprocessor.Preprocessor\" href=\"#pcpp.preprocessor.Preprocessor\">Preprocessor</a></code>\n                            </h4>\n                            <ul class=\"two-column\">\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.add_path\" href=\"#pcpp.preprocessor.Preprocessor.add_path\">add_path</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.collect_args\" href=\"#pcpp.preprocessor.Preprocessor.collect_args\">collect_args</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.define\" href=\"#pcpp.preprocessor.Preprocessor.define\">define</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.evalexpr\" href=\"#pcpp.preprocessor.Preprocessor.evalexpr\">evalexpr</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.expand_macros\" href=\"#pcpp.preprocessor.Preprocessor.expand_macros\">expand_macros</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.group_lines\" href=\"#pcpp.preprocessor.Preprocessor.group_lines\">group_lines</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.include\" href=\"#pcpp.preprocessor.Preprocessor.include\">include</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.macro_expand_args\" href=\"#pcpp.preprocessor.Preprocessor.macro_expand_args\">macro_expand_args</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.macro_prescan\" href=\"#pcpp.preprocessor.Preprocessor.macro_prescan\">macro_prescan</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.parse\" href=\"#pcpp.preprocessor.Preprocessor.parse\">parse</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.parsegen\" href=\"#pcpp.preprocessor.Preprocessor.parsegen\">parsegen</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.token\" href=\"#pcpp.preprocessor.Preprocessor.token\">token</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.tokenize\" href=\"#pcpp.preprocessor.Preprocessor.tokenize\">tokenize</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.tokenstrip\" href=\"#pcpp.preprocessor.Preprocessor.tokenstrip\">tokenstrip</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.undef\" href=\"#pcpp.preprocessor.Preprocessor.undef\">undef</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.Preprocessor.write\" href=\"#pcpp.preprocessor.Preprocessor.write\">write</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                        <li>\n                            <h4><code><a title=\"pcpp.preprocessor.PreprocessorHooks\" href=\"#pcpp.preprocessor.PreprocessorHooks\">PreprocessorHooks</a></code>\n                            </h4>\n                            <ul class=\"\">\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.__init__\" href=\"#pcpp.preprocessor.PreprocessorHooks.__init__\">__init__</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_comment\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_comment\">on_comment</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_directive_handle\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_directive_handle\">on_directive_handle</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_directive_unknown\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_directive_unknown\">on_directive_unknown</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_error\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_error\">on_error</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_file_open\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_file_open\">on_file_open</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_include_not_found\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_include_not_found\">on_include_not_found</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_potential_include_guard\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_potential_include_guard\">on_potential_include_guard</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_unknown_macro_function_in_expr\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_unknown_macro_function_in_expr\">on_unknown_macro_function_in_expr</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_unknown_macro_in_defined_expr\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_unknown_macro_in_defined_expr\">on_unknown_macro_in_defined_expr</a></code>\n                                </li>\n                                <li><code><a title=\"pcpp.preprocessor.PreprocessorHooks.on_unknown_macro_in_expr\" href=\"#pcpp.preprocessor.PreprocessorHooks.on_unknown_macro_in_expr\">on_unknown_macro_in_expr</a></code>\n                                </li>\n                            </ul>\n                        </li>\n                    </ul>\n                </li>\n            </ul>\n        </nav>\n    </main>\n    <footer id=\"footer\">\n        <p>Generated by <a href=\"https://pdoc3.github.io/pdoc\"><cite>pdoc</cite> 0.5.3</a>.</p>\n    </footer>\n    <script src=\"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js\"></script>\n    <script>hljs.initHighlightingOnLoad()</script>\n</body>\n\n</html>"
  },
  {
    "path": "pcpp/__init__.py",
    "content": "from .evaluator import Evaluator\nfrom .parser import Action, OutputDirective\nfrom .pcmd import main, version, CmdPreprocessor\nfrom .preprocessor import Preprocessor\n__version__ = version\n"
  },
  {
    "path": "pcpp/evaluator.py",
    "content": "#!/usr/bin/python\n# Python C99 conforming preprocessor expression evaluator\n# (C) 2019-2026 Niall Douglas http://www.nedproductions.biz/\n# Started: Apr 2019\n\nimport sys, os, re, codecs, copy\nif __name__ == '__main__' and __package__ is None:\n    sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )\nfrom pcpp.parser import STRING_TYPES, yacc, default_lexer, in_production\n\n# The width of signed integer which this evaluator will use\nINTMAXBITS = 64\n\nINTBASETYPE = int\n\n# Precompile the regular expression for correctly expanding unicode escape\n# sequences in Python 2 and 3. See https://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python\n# for more information.\n_expand_escape_sequences_pat = re.compile(r'''\n    ( \\\\U........      # 8-digit hex escapes\n    | \\\\u....          # 4-digit hex escapes\n    | \\\\x..            # 2-digit hex escapes\n    | \\\\[0-7]{1,3}     # Octal escapes\n    | \\\\N\\{[^}]+\\}     # Unicode characters by name\n    | \\\\[\\\\'\"abfnrtv]  # Single-character escapes\n)''', re.UNICODE | re.VERBOSE)\n\nclass Value(INTBASETYPE):\n    \"\"\"A signed or unsigned integer within a preprocessor expression, bounded\n    to within INT_MIN and INT_MAX, or 0 and UINT_MAX. Signed overflow is handled\n    like a two's complement CPU, despite being UB, as that's what GCC and clang do.\n    \n    >>> Value(5)\n    Value(5)\n    >>> Value('5L')\n    Value(5)\n    >>> Value('5U')\n    Value(5U)\n    >>> Value('0')\n    Value(0)\n    >>> Value('0U')\n    Value(0U)\n    >>> Value('-1U')\n    Value(18446744073709551615U)\n    >>> Value(5) * Value(2)\n    Value(10)\n    >>> Value(5) + Value('2u')\n    Value(7U)\n    >>> Value(5) * 2\n    Value(10)\n    >>> Value(5) / 2   # Must return integer\n    Value(2)\n    >>> Value(50) % 8\n    Value(2)\n    >>> -Value(5)\n    Value(-5)\n    >>> +Value(-5)\n    Value(-5)\n    >>> ~Value(5)\n    Value(-6)\n    >>> Value(6) & 2\n    Value(2)\n    >>> Value(4) | 2\n    Value(6)\n    >>> Value(6) ^ 2\n    Value(4)\n    >>> Value(2) << 2\n    Value(8)\n    >>> Value(8) >> 2\n    Value(2)\n    >>> Value(9223372036854775808)\n    Value(-9223372036854775808)\n    >>> Value(-9223372036854775809)\n    Value(9223372036854775807)\n    >>> Value(18446744073709551615)\n    Value(-1)\n    >>> Value(False)\n    Value(0)\n    >>> Value(True)\n    Value(1)\n    >>> Value(5) == Value(6)\n    Value(0)\n    >>> Value(5) == Value(5)\n    Value(1)\n    >>> not Value(2)\n    Traceback (most recent call last):\n    ...\n    AssertionError\n    >>> Value(4) and Value(2)\n    Traceback (most recent call last):\n    ...\n    AssertionError\n    >>> Value(5) and not Value(6)\n    Traceback (most recent call last):\n    ...\n    AssertionError\n    >>> Value('0x3f')\n    Value(63)\n    >>> Value('077')\n    Value(63)\n    >>> Value(\"'N'\")\n    Value(78)\n    >>> Value(\"L'N'\")\n    Value(78)\n    >>> Value(\"u8'N'\")\n    Value(78)\n    >>> Value(\"u'N'\")\n    Value(78)\n    >>> Value(\"U'N'\")\n    Value(78)\n    >>> Value(\"u'猫'\")\n    Value(29483)\n    >>> Value(\"U'猫'\")\n    Value(29483)\n    >>> Value(\"U'🍌'\")\n    Value(127820)\n    >>> Value(\"'\\\\n'\")\n    Value(10)\n    >>> Value(\"'\\\\\\\\n'\")\n    Value(10)\n    >>> Value(\"'\\\\\\\\'\")\n    Value(92)\n    >>> Value(\"'\\\\'\")\n    Traceback (most recent call last):\n    ...\n    SyntaxError: Empty character escape sequence\n    \"\"\"\n    INT_MIN = -(1 << (INTMAXBITS - 1))\n    INT_MAX = (1 << (INTMAXBITS - 1)) - 1\n    INT_MASK = (1 << INTMAXBITS) - 1\n    UINT_MIN = 0\n    UINT_MAX = (1 << INTMAXBITS) - 1\n    @classmethod\n    def __sclamp(cls, value):\n        value = INTBASETYPE(value)\n        return ((value - cls.INT_MIN) & cls.INT_MASK) + cls.INT_MIN\n    @classmethod\n    def __uclamp(cls, value):\n        value = INTBASETYPE(value)\n        return value & cls.UINT_MAX\n    def __new__(cls, value, unsigned = False, exception = None):\n        if isinstance(value, Value):\n            unsigned = value.unsigned\n            exception = value.exception\n        elif isinstance(value, INTBASETYPE) or isinstance(value, int) or isinstance(value, float):\n            value = cls.__uclamp(value) if unsigned else cls.__sclamp(value)\n        elif isinstance(value, STRING_TYPES):\n            if (value.startswith(\"L'\") or \n                value.startswith(\"u8'\") or \n                value.startswith(\"u'\") or \n                value.startswith(\"U'\") or \n                value[0] == \"'\") and value[-1] == \"'\":\n                startidx = 3 if value.startswith(\"u8\") else 2 if value[0] != \"'\" else 1\n                #print(\"1. ***\", value, file = sys.stderr)\n                value = value[startidx:-1].replace(\"\\\\\\n\", '')\n                if len(value) == 0:\n                    raise SyntaxError('Empty character escape sequence')\n                #print(\"2. ***\", value, file = sys.stderr)\n                value = _expand_escape_sequences_pat.sub(lambda x: codecs.decode(x.group(0), 'unicode-escape'), value)\n                #print(\"3. ***\", value, file = sys.stderr)\n                x = INTBASETYPE(ord(value))\n                #print(\"4. ***\", x, file = sys.stderr)\n            elif value.startswith('0x') or value.startswith('0X'):\n                # Strip any terminators\n                while not ((value[-1] >= '0' and value[-1] <= '9') or (value[-1] >= 'a' and value[-1] <= 'f') or (value[-1] >= 'A' and value[-1] <= 'F')):\n                    if value[-1] == 'u' or value[-1] == 'U':\n                        unsigned = True\n                    value = value[:-1]\n                x = INTBASETYPE(value, base = 16)\n            elif value.startswith('0'):\n                # Strip any terminators\n                while not (value[-1] >= '0' and value[-1] <= '7'):\n                    if value[-1] == 'u' or value[-1] == 'U':\n                        unsigned = True\n                    value = value[:-1]\n                x = INTBASETYPE(value, base = 8)\n            else:\n                # Strip any terminators\n                while not (value[-1] >= '0' and value[-1] <= '9'):\n                    if value[-1] == 'u' or value[-1] == 'U':\n                        unsigned = True\n                    value = value[:-1]\n                x = INTBASETYPE(value)\n            value = cls.__uclamp(x) if unsigned else cls.__sclamp(x)\n            #assert x == value\n        else:\n            print('Unknown value type: %s' % repr(type(value)), file = sys.stderr)\n            assert False  # Input is an unrecognised type\n        inst = super(Value, cls).__new__(cls, value)\n        inst.unsigned = unsigned\n        inst.exception = exception\n        return inst\n    def value(self):\n        if self.exception is not None:\n            raise self.exception\n        return INTBASETYPE(self)\n    def __add__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) + self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__add__(other))\n    def __sub__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) - self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__sub__(other))\n    def __mul__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) * self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__mul__(other))\n    def __div__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) / self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__div__(other))\n    def __truediv__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) / self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__truediv__(other))\n    def __mod__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) % self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__mod__(other))\n    def __neg__(self):\n        if self.exception is not None:\n            return self\n        return Value(super(Value, self).__neg__(), self.unsigned)\n    def __invert__(self):\n        if self.exception is not None:\n            return self\n        return Value(super(Value, self).__invert__(), self.unsigned)\n    def __and__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) & self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__and__(other))\n    def __or__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) | self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__or__(other))\n    def __pos__(self):\n        if self.exception is not None:\n            return self\n        return Value(super(Value, self).__pos__())\n    def __pow__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) ** self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__pow__(other))\n    def __lshift__(self, other):\n        if self.exception is not None:\n            return self\n        # Ignore other signedness\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) << self.__uclamp(other), True) if (self.unsigned) else Value(super(Value, self).__lshift__(other))\n    def __rshift__(self, other):\n        if self.exception is not None:\n            return self\n        # Ignore other signedness\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) >> self.__uclamp(other), True) if (self.unsigned) else Value(super(Value, self).__rshift__(other))\n    def __xor__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) ^ self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(super(Value, self).__xor__(other))\n    def __repr__(self):\n        if self.exception is not None:\n            return \"Exception(%s)\" % repr(self.exception)\n        elif self.unsigned:\n            return \"Value(%dU)\" % INTBASETYPE(self)\n        else:\n            return \"Value(%d)\" % INTBASETYPE(self)\n    def __bool__(self):\n        assert False  # Do not use Python logical operations\n    def __cmp__(self, other):\n        assert False\n    def __lt__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) < self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) < self.__sclamp(other), False)\n    def __le__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) <= self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) <= self.__sclamp(other), False)\n    def __eq__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) == self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) == self.__sclamp(other), False)\n    def __ne__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) != self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) != self.__sclamp(other), False)\n    def __ge__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) >= self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) >= self.__sclamp(other), False)\n    def __gt__(self, other):\n        if self.exception is not None:\n            return self\n        other = Value(other)\n        if other.exception is not None:\n            return other\n        return Value(self.__uclamp(self) > self.__uclamp(other), True) if (self.unsigned or other.unsigned) else Value(self.__sclamp(self) > self.__sclamp(other), False)\n\n        \n# PLY yacc specification\n# Valid C preprocessor expression items:\n#   - Integer constants\n#   - Character constants\n#   - Addition, subtraction, multiplication, division, bitwise and-or-xor, shifts,\n#     comparisons, logical and-or-not\n#   - defined()\n#\n# The C preprocessor does not support:\n#   - assignment\n#   - increment and decrement\n#   - array indexing, indirection\n#   - casting\n#   - sizeof, alignof\n\n# The subset of tokens from Preprocessor used in preprocessor expressions\ntokens = (\n   'CPP_ID', 'PP_NUMBER', 'CPP_CHAR', 'CPP_STRING',\n   'CPP_PLUS', 'CPP_MINUS', 'CPP_STAR', 'CPP_FSLASH', 'CPP_PERCENT', 'CPP_BAR',\n   'CPP_AMPERSAND', 'CPP_TILDE', 'CPP_HAT', 'CPP_LESS', 'CPP_GREATER', 'CPP_EXCLAMATION',\n   'CPP_QUESTION', 'CPP_LPAREN', 'CPP_RPAREN',\n   'CPP_COMMA', 'CPP_COLON',\n\n   'CPP_LSHIFT', 'CPP_LESSEQUAL', 'CPP_RSHIFT',\n   'CPP_GREATEREQUAL', 'CPP_LOGICALOR', 'CPP_LOGICALAND', 'CPP_EQUALITY',\n   'CPP_INEQUALITY'\n)\n# 'CPP_WS', 'CPP_EQUAL',  'CPP_BSLASH', 'CPP_SQUOTE',\n\nprecedence = (\n    ('left', 'CPP_COMMA'),                                                     # 15\n                                                                               # 14 (assignments, unused)\n    ('left', 'CPP_QUESTION', 'CPP_COLON'),                                     # 13\n    ('left', 'CPP_LOGICALOR'),                                                 # 12\n    ('left', 'CPP_LOGICALAND'),                                                # 11\n    ('left', 'CPP_BAR'),                                                       # 10\n    ('left', 'CPP_HAT'),                                                       # 9\n    ('left', 'CPP_AMPERSAND'),                                                 # 8\n    ('left', 'CPP_EQUALITY', 'CPP_INEQUALITY'),                                # 7\n    ('left', 'CPP_LESS', 'CPP_LESSEQUAL', 'CPP_GREATER', 'CPP_GREATEREQUAL'),  # 6\n    ('left', 'CPP_LSHIFT', 'CPP_RSHIFT'),                                      # 5\n    ('left', 'CPP_PLUS', 'CPP_MINUS'),                                         # 4\n    ('left', 'CPP_STAR', 'CPP_FSLASH', 'CPP_PERCENT'),                         # 3\n    ('right', 'UPLUS', 'UMINUS', 'CPP_EXCLAMATION', 'CPP_TILDE'),              # 2\n                                                                               # 1 (unused in the C preprocessor)\n)\n\ndef p_error(p):\n    if p:\n        raise SyntaxError(\"around token '%s' type %s\" % (p.value, p.type))\n    else:\n        raise SyntaxError(\"at EOF\")\n\ndef p_expression_number(p):\n    'expression : PP_NUMBER'\n    try:\n        p[0] = Value(p[1])\n    except:\n        p[0] = p[1]\n\n\ndef p_expression_character(p):\n    'expression : CPP_CHAR'\n    p[0] = Value(p[1])\n\ndef p_expression_string(p):\n    \"\"\"\n    expression : CPP_STRING\n              | CPP_LESS expression CPP_GREATER\n    \"\"\"\n    p[0] = p[1]\n\ndef p_expression_group(t):\n    'expression : CPP_LPAREN expression CPP_RPAREN'\n    t[0] = t[2]\n\ndef p_expression_uplus(p):\n    'expression : CPP_PLUS expression %prec UPLUS'\n    p[0] = +Value(p[2])\n\ndef p_expression_uminus(p):\n    'expression : CPP_MINUS expression %prec UMINUS'\n    p[0] = -Value(p[2])\n\ndef p_expression_unop(p):\n    \"\"\"\n    expression : CPP_EXCLAMATION expression\n              | CPP_TILDE expression\n    \"\"\"\n    try:\n        if p[1] == '!':\n            p[0] = Value(0) if (Value(p[2]).value()!=0) else Value(1)\n        elif p[1] == '~':\n            p[0] = ~Value(p[2])\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\ndef p_expression_binop(p):\n    \"\"\"\n    expression : expression CPP_STAR expression\n              | expression CPP_FSLASH expression\n              | expression CPP_PERCENT expression\n              | expression CPP_PLUS expression\n              | expression CPP_MINUS expression\n              | expression CPP_LSHIFT expression\n              | expression CPP_RSHIFT expression\n              | expression CPP_LESS expression\n              | expression CPP_LESSEQUAL expression\n              | expression CPP_GREATER expression\n              | expression CPP_GREATEREQUAL expression\n              | expression CPP_EQUALITY expression\n              | expression CPP_INEQUALITY expression\n              | expression CPP_AMPERSAND expression\n              | expression CPP_HAT expression\n              | expression CPP_BAR expression\n              | expression CPP_LOGICALAND expression\n              | expression CPP_LOGICALOR expression\n              | expression CPP_COMMA expression\n    \"\"\"\n    # print [repr(p[i]) for i in range(0,4)]\n    try:\n        if p[2] == '*':\n            p[0] = Value(p[1]) * Value(p[3])\n        elif p[2] == '/':\n            p[0] = Value(p[1]) / Value(p[3])\n        elif p[2] == '%':\n            p[0] = Value(p[1]) % Value(p[3])\n        elif p[2] == '+':\n            p[0] = Value(p[1]) + Value(p[3])\n        elif p[2] == '-':\n            p[0] = Value(p[1]) - Value(p[3])\n        elif p[2] == '<<':\n            p[0] = Value(p[1]) << Value(p[3])\n        elif p[2] == '>>':\n            p[0] = Value(p[1]) >> Value(p[3])\n        elif p[2] == '<':\n            p[0] = Value(p[1]) < Value(p[3])\n        elif p[2] == '<=':\n            p[0] = Value(p[1]) <= Value(p[3])\n        elif p[2] == '>':\n            p[0] = Value(p[1]) > Value(p[3])\n        elif p[2] == '>=':\n            p[0] = Value(p[1]) >= Value(p[3])\n        elif p[2] == '==':\n            p[0] = Value(p[1]) == Value(p[3])\n        elif p[2] == '!=':\n            p[0] = Value(p[1]) != Value(p[3])\n        elif p[2] == '&':\n            p[0] = Value(p[1]) & Value(p[3])\n        elif p[2] == '^':\n            p[0] = Value(p[1]) ^ Value(p[3])\n        elif p[2] == '|':\n            p[0] = Value(p[1]) | Value(p[3])\n        elif p[2] == '&&':\n            p[0] = Value(1) if (Value(p[1]).value()!=0 and Value(p[3]).value()!=0) else Value(0)\n        elif p[2] == '||':\n            p[0] = Value(1) if (Value(p[1]).value()!=0 or Value(p[3]).value()!=0) else Value(0)\n        elif p[2] == ',':\n            p[0] = Value(p[3])\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\ndef p_expression_conditional(p):\n    'expression : expression CPP_QUESTION expression CPP_COLON expression'\n    try:\n        # Output type must cast up to unsigned if either input is unsigned\n        p[0] = Value(p[3]) if (Value(p[1]).value()!=0) else Value(p[5])\n        try:\n            p[0] = Value(p[0].value(), unsigned = Value(p[3]).unsigned or Value(p[5]).unsigned)\n        except:\n            pass\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\ndef p_expression_function_call(p):\n    \"expression : CPP_ID CPP_LPAREN expression CPP_RPAREN\"\n    try:\n        p.lexer.on_function_call(p)\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\ndef p_expression_identifier(p):\n    \"expression : CPP_ID\"\n    try:\n        p.lexer.on_identifier(p)\n    except Exception as e:\n        p[0] = Value(0, exception = e)\n\n\nclass Evaluator(object):\n    \"\"\"Evaluator of #if C preprocessor expressions.\n    \n    >>> e = Evaluator()\n    >>> e('5')\n    Value(5)\n    >>> e('5+6')\n    Value(11)\n    >>> e('5+6*2')\n    Value(17)\n    >>> e('5/2+6*2')\n    Value(14)\n    >>> e('5 < 6 <= 7')\n    Value(1)\n    >>> e('5 < 6 && 8 > 7')\n    Value(1)\n    >>> e('18446744073709551615 == -1')\n    Value(1)\n    >>> e('-9223372036854775809 == 9223372036854775807')\n    Value(1)\n    >>> e('-1 < 0U')\n    Value(0U)\n    >>> e('(( 0L && 0) || (!0L && !0 ))')\n    Value(1)\n    >>> e('(1)?2:3')\n    Value(2)\n    >>> e('(1 ? -1 : 0) <= 0')\n    Value(1)\n    >>> e('(1 ? -1 : 0U)')       # Output type of ? must be common between both choices\n    Value(18446744073709551615U)\n    >>> e('(1 ? -1 : 0U) <= 0')\n    Value(0U)\n    >>> e('1 && 10 / 0')         # doctest: +ELLIPSIS\n    Exception(ZeroDivisionError(...\n    >>> e('0 && 10 / 0')         # && must shortcut\n    Value(0)\n    >>> e('1 ? 10 / 0 : 0')      # doctest: +ELLIPSIS\n    Exception(ZeroDivisionError(...\n    >>> e('0 ? 10 / 0 : 0')      # ? must shortcut\n    Value(0)\n    >>> e('(3 ^ 5) != 6 || (3 | 5) != 7 || (3 & 5) != 1')\n    Value(0)\n    >>> e('1 << 2 != 4 || 8 >> 1 != 4')\n    Value(0)\n    >>> e('(2 || 3) != 1 || (2 && 3) != 1 || (0 || 4) != 1 || (0 && 5) != 0')\n    Value(0)\n    >>> e('-1 << 3U > 0')\n    Value(0)\n    >>> e(\"'N' == 78\")\n    Value(1)\n    >>> e('0x3f == 63')\n    Value(1)\n    >>> e(\"'\\\\\\\\n'\")\n    Value(10)\n    >>> e(\"'\\\\\\\\\\\\\\\\'\")\n    Value(92)\n    >>> e(\"'\\\\\\\\n' == 0xA\")\n    Value(1)\n    >>> e(\"'\\\\\\\\\\\\\\\\' == 0x5c\")\n    Value(1)\n    >>> e(\"L'\\\\\\\\0' == 0\")\n    Value(1)\n    >>> e('12 == 12')\n    Value(1)\n    >>> e('12L == 12')\n    Value(1)\n    >>> e('-1 >= 0U')\n    Value(1U)\n    >>> e('(1<<2) == 4')\n    Value(1)\n    >>> e('(-!+!9) == -1')\n    Value(1)\n    >>> e('(2 || 3) == 1')\n    Value(1)\n    >>> e('1L * 3 != 3')\n    Value(0)\n    >>> e('(!1L != 0) || (-1L != -1)')\n    Value(0)\n    >>> e('0177777 == 65535')\n    Value(1)\n    >>> e('0Xffff != 65535 || 0XFfFf == 65535')\n    Value(1)\n    >>> e('0L != 0 || 0l != 0')\n    Value(0)\n    >>> e('1U != 1 || 1u == 1')\n    Value(1)\n    >>> e('0 <= -1')\n    Value(0)\n    >>> e('1 << 2 != 4 || 8 >> 1 == 4')\n    Value(1)\n    >>> e('(3 ^ 5) == 6')\n    Value(1)\n    >>> e('(3 | 5) == 7')\n    Value(1)\n    >>> e('(3 & 5) == 1')\n    Value(1)\n    >>> e('(3 ^ 5) != 6 || (3 | 5) != 7 || (3 & 5) != 1')\n    Value(0)\n    >>> e('(0 ? 1 : 2) != 2')\n    Value(0)\n    >>> e('-1 << 3U > 0')\n    Value(0)\n    >>> e('0 && 10 / 0')\n    Value(0)\n    >>> e('not_defined && 10 / not_defined')  # doctest: +ELLIPSIS\n    Exception(SyntaxError('Unknown identifier not_defined'...\n    >>> e('0 && 10 / 0 > 1')\n    Value(0)\n    >>> e('(0) ? 10 / 0 : 0')\n    Value(0)\n    >>> e('0 == 0 || 10 / 0 > 1')\n    Value(1)\n    >>> e('(15 >> 2 >> 1 != 1) || (3 << 2 << 1 != 24)')\n    Value(0)\n    >>> e('(1 | 2) == 3 && 4 != 5 || 0')\n    Value(1)\n    >>> e('1  >  0')\n    Value(1)\n    >>> e(\"'\\123' != 83\")\n    Value(0)\n    >>> e(\"'\\x1b' != '\\033'\")\n    Value(0)\n    >>> e('0 + (1 - (2 + (3 - (4 + (5 - (6 + (7 - (8 + (9 - (10 + (11 - (12 +          (13 - (14 + (15 - (16 + (17 - (18 + (19 - (20 + (21 - (22 + (23 -           (24 + (25 - (26 + (27 - (28 + (29 - (30 + (31 - (32 + 0))))))))))           )))))))))))))))))))))) == 0')\n    Value(1)\n    >>> e('test_function(X)', functions={'test_function':lambda x: 55})\n    Value(55)\n    >>> e('test_identifier', identifiers={'test_identifier':11})\n    Value(11)\n    >>> e('defined(X)', functions={'defined':lambda x: 55})\n    Value(55)\n    >>> e('defined(X)')  # doctest: +ELLIPSIS\n    Exception(SyntaxError('Unknown function defined'...\n    >>> e('__has_include(\"variant\")')  # doctest: +ELLIPSIS\n    Exception(SyntaxError('Unknown function __has_include'...\n    >>> e('__has_include(<variant>)')  # doctest: +ELLIPSIS\n    Exception(SyntaxError('Unknown function __has_include'...\n    >>> e('5  // comment')\n    Value(5)\n    >>> e('5  /* comment */')\n    Value(5)\n    >>> e('5  /* comment // more */')\n    Value(5)\n    >>> e('5  // /* comment */')\n    Value(5)\n    \"\"\"\n#    >>> e('defined X', functions={'defined':lambda x: 55})\n#    Value(55)\n\n    def __init__(self, lexer = None):\n        self.lexer = lexer if lexer is not None else default_lexer()\n        self.parser = yacc.yacc(optimize=in_production,debug=not in_production,write_tables=not in_production)\n\n    class __lexer(object):\n\n        def __init__(self, functions, identifiers):\n            self.__toks = []\n            self.__functions = functions\n            self.__identifiers = identifiers\n\n        def input(self, toks):\n            self.__toks = [tok for tok in toks if tok.type != 'CPP_WS' and tok.type != 'CPP_LINECONT' and tok.type != 'CPP_COMMENT1' and tok.type != 'CPP_COMMENT2']\n            self.__idx = 0\n\n        def token(self):\n            if self.__idx >= len(self.__toks):\n                return None\n            self.__idx = self.__idx + 1\n            return self.__toks[self.__idx - 1]\n\n        def on_function_call(self, p):\n            if p[1] not in self.__functions:\n                raise SyntaxError('Unknown function %s' % p[1])\n            p[0] = Value(self.__functions[p[1]](p[3]))\n\n        def on_identifier(self, p):\n            if p[1] not in self.__identifiers:\n                raise SyntaxError('Unknown identifier %s' % p[1])\n            p[0] = Value(self.__identifiers[p[1]])\n            \n    def __call__(self, input, functions = {}, identifiers = {}):\n        \"\"\"Execute a fully macro expanded set of tokens representing an expression,\n        returning the result of the evaluation.\n        \"\"\"\n        if not isinstance(input,list):\n            self.lexer.input(input)\n            input = []\n            while True:\n                tok = self.lexer.token()\n                if not tok:\n                    break\n                input.append(tok)\n        return self.parser.parse(input, lexer = self.__lexer(functions, identifiers))\n\n\nif __name__ == \"__main__\":\n    import doctest\n    doctest.testmod()\n\n"
  },
  {
    "path": "pcpp/lextab.py",
    "content": "# lextab.py. This file automatically created by PLY (version 3.11). Don't edit!\n_tabversion   = '3.10'\n_lextokens    = set(('CPP_AMPERSAND', 'CPP_ANDEQUAL', 'CPP_BAR', 'CPP_BSLASH', 'CPP_CHAR', 'CPP_COLON', 'CPP_COMMA', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_DEREFERENCE', 'CPP_DIVIDEEQUAL', 'CPP_DOT', 'CPP_DPOUND', 'CPP_DQUOTE', 'CPP_EQUAL', 'CPP_EQUALITY', 'CPP_EXCLAMATION', 'CPP_FSLASH', 'CPP_GREATER', 'CPP_GREATEREQUAL', 'CPP_HAT', 'CPP_ID', 'CPP_INEQUALITY', 'CPP_LBRACKET', 'CPP_LCURLY', 'CPP_LESS', 'CPP_LESSEQUAL', 'CPP_LINECONT', 'CPP_LOGICALAND', 'CPP_LOGICALOR', 'CPP_LPAREN', 'CPP_LSHIFT', 'CPP_LSHIFTEQUAL', 'CPP_MINUS', 'CPP_MINUSEQUAL', 'CPP_MINUSMINUS', 'CPP_MULTIPLYEQUAL', 'CPP_OREQUAL', 'CPP_PERCENT', 'CPP_PERCENTEQUAL', 'CPP_PLUS', 'CPP_PLUSEQUAL', 'CPP_PLUSPLUS', 'CPP_POUND', 'CPP_QUESTION', 'CPP_RBRACKET', 'CPP_RCURLY', 'CPP_RPAREN', 'CPP_RSHIFT', 'CPP_RSHIFTEQUAL', 'CPP_SEMICOLON', 'CPP_SQUOTE', 'CPP_STAR', 'CPP_STRING', 'CPP_TILDE', 'CPP_WS', 'CPP_XOREQUAL', 'PP_NUMBER'))\n_lexreflags   = 64\n_lexliterals  = '+-*/%|&~^<>=!?()[]{}.,;:\\\\\\'\"'\n_lexstateinfo = {'INITIAL': 'inclusive'}\n_lexstatere   = {'INITIAL': [('(?P<t_CPP_WS>([ \\\\t]+|\\\\n))|(?P<t_CPP_LINECONT>\\\\\\\\[ \\\\t]*\\\\n)|(?P<t_PP_NUMBER>\\\\.?\\\\d(?:\\\\.|[\\\\w_]|\\'[\\\\w_]|[eEpP][-+])*)|(?P<t_CPP_STRING>\\\\\"([^\\\\\\\\\\\\n]|(\\\\\\\\(.|\\\\n)))*?\\\\\")|(?P<t_CPP_CHAR>(u8|u|U|L)?\\\\\\'([^\\\\\\\\\\\\n]|(\\\\\\\\(.|\\\\n)))*?\\\\\\')|(?P<t_CPP_COMMENT1>(/\\\\*(.|\\\\n)*?\\\\*/))|(?P<t_CPP_COMMENT2>(//[^\\\\n]*))|(?P<t_CPP_ID>[A-Za-z_][\\\\w_]*)|(?P<t_CPP_DPOUND>\\\\#\\\\#)|(?P<t_CPP_LOGICALOR>\\\\|\\\\|)|(?P<t_CPP_PLUSPLUS>\\\\+\\\\+)|(?P<t_CPP_OREQUAL>\\\\|=)|(?P<t_CPP_MULTIPLYEQUAL>\\\\*=)|(?P<t_CPP_PLUSEQUAL>\\\\+=)|(?P<t_CPP_LSHIFTEQUAL><<=)|(?P<t_CPP_RSHIFTEQUAL>>>=)|(?P<t_CPP_POUND>\\\\#)|(?P<t_CPP_PLUS>\\\\+)|(?P<t_CPP_STAR>\\\\*)|(?P<t_CPP_BAR>\\\\|)|(?P<t_CPP_HAT>\\\\^)|(?P<t_CPP_QUESTION>\\\\?)|(?P<t_CPP_LPAREN>\\\\()|(?P<t_CPP_RPAREN>\\\\))|(?P<t_CPP_LBRACKET>\\\\[)|(?P<t_CPP_RBRACKET>\\\\])|(?P<t_CPP_DOT>\\\\.)|(?P<t_CPP_BSLASH>\\\\\\\\)|(?P<t_CPP_DEREFERENCE>->)|(?P<t_CPP_MINUSEQUAL>-=)|(?P<t_CPP_MINUSMINUS>--)|(?P<t_CPP_LSHIFT><<)|(?P<t_CPP_LESSEQUAL><=)|(?P<t_CPP_RSHIFT>>>)|(?P<t_CPP_GREATEREQUAL>>=)|(?P<t_CPP_LOGICALAND>&&)|(?P<t_CPP_ANDEQUAL>&=)|(?P<t_CPP_EQUALITY>==)|(?P<t_CPP_INEQUALITY>!=)|(?P<t_CPP_XOREQUAL>^=)|(?P<t_CPP_DIVIDEEQUAL>/=)|(?P<t_CPP_PERCENTEQUAL>%=)|(?P<t_CPP_MINUS>-)|(?P<t_CPP_FSLASH>/)|(?P<t_CPP_PERCENT>%)|(?P<t_CPP_AMPERSAND>&)|(?P<t_CPP_TILDE>~)|(?P<t_CPP_LESS><)|(?P<t_CPP_GREATER>>)|(?P<t_CPP_EQUAL>=)|(?P<t_CPP_EXCLAMATION>!)|(?P<t_CPP_LCURLY>{)|(?P<t_CPP_RCURLY>})|(?P<t_CPP_COMMA>,)|(?P<t_CPP_SEMICOLON>;)|(?P<t_CPP_COLON>:)|(?P<t_CPP_SQUOTE>\\')|(?P<t_CPP_DQUOTE>\")', [None, ('t_CPP_WS', 'CPP_WS'), None, ('t_CPP_LINECONT', 'CPP_LINECONT'), ('t_PP_NUMBER', 'PP_NUMBER'), ('t_CPP_STRING', 'CPP_STRING'), None, None, None, ('t_CPP_CHAR', 'CPP_CHAR'), None, None, None, None, ('t_CPP_COMMENT1', 'CPP_COMMENT1'), None, None, ('t_CPP_COMMENT2', 'CPP_COMMENT2'), None, (None, 'CPP_ID'), (None, 'CPP_DPOUND'), (None, 'CPP_LOGICALOR'), (None, 'CPP_PLUSPLUS'), (None, 'CPP_OREQUAL'), (None, 'CPP_MULTIPLYEQUAL'), (None, 'CPP_PLUSEQUAL'), (None, 'CPP_LSHIFTEQUAL'), (None, 'CPP_RSHIFTEQUAL'), (None, 'CPP_POUND'), (None, 'CPP_PLUS'), (None, 'CPP_STAR'), (None, 'CPP_BAR'), (None, 'CPP_HAT'), (None, 'CPP_QUESTION'), (None, 'CPP_LPAREN'), (None, 'CPP_RPAREN'), (None, 'CPP_LBRACKET'), (None, 'CPP_RBRACKET'), (None, 'CPP_DOT'), (None, 'CPP_BSLASH'), (None, 'CPP_DEREFERENCE'), (None, 'CPP_MINUSEQUAL'), (None, 'CPP_MINUSMINUS'), (None, 'CPP_LSHIFT'), (None, 'CPP_LESSEQUAL'), (None, 'CPP_RSHIFT'), (None, 'CPP_GREATEREQUAL'), (None, 'CPP_LOGICALAND'), (None, 'CPP_ANDEQUAL'), (None, 'CPP_EQUALITY'), (None, 'CPP_INEQUALITY'), (None, 'CPP_XOREQUAL'), (None, 'CPP_DIVIDEEQUAL'), (None, 'CPP_PERCENTEQUAL'), (None, 'CPP_MINUS'), (None, 'CPP_FSLASH'), (None, 'CPP_PERCENT'), (None, 'CPP_AMPERSAND'), (None, 'CPP_TILDE'), (None, 'CPP_LESS'), (None, 'CPP_GREATER'), (None, 'CPP_EQUAL'), (None, 'CPP_EXCLAMATION'), (None, 'CPP_LCURLY'), (None, 'CPP_RCURLY'), (None, 'CPP_COMMA'), (None, 'CPP_SEMICOLON'), (None, 'CPP_COLON'), (None, 'CPP_SQUOTE'), (None, 'CPP_DQUOTE')])]}\n_lexstateignore = {'INITIAL': ''}\n_lexstateerrorf = {'INITIAL': 't_error'}\n_lexstateeoff = {}\n"
  },
  {
    "path": "pcpp/parser.py",
    "content": "#!/usr/bin/python\n# Python C99 conforming preprocessor parser config\n# (C) 2017-2026 Niall Douglas http://www.nedproductions.biz/\n# and (C) 2007-2017 David Beazley http://www.dabeaz.com/\n# Started: Feb 2017\n#\n# This C preprocessor was originally written by David Beazley and the\n# original can be found at https://github.com/dabeaz/ply/blob/master/ply/cpp.py\n# This edition substantially improves on standards conforming output,\n# getting quite close to what clang or GCC outputs.\n\nimport sys, re, os\n\nin_production = 1  # Set to 0 if editing pcpp implementation!\n\nSTRING_TYPES = str\n\n# -----------------------------------------------------------------------------\n# Default preprocessor lexer definitions.   These tokens are enough to get\n# a basic preprocessor working.   Other modules may import these if they want\n# -----------------------------------------------------------------------------\n\ntokens = (\n   'CPP_ID', 'PP_NUMBER', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_LINECONT', 'CPP_COMMENT1', 'CPP_COMMENT2',\n   'CPP_POUND','CPP_DPOUND', 'CPP_PLUS', 'CPP_MINUS', 'CPP_STAR', 'CPP_FSLASH', 'CPP_PERCENT', 'CPP_BAR',\n   'CPP_AMPERSAND', 'CPP_TILDE', 'CPP_HAT', 'CPP_LESS', 'CPP_GREATER', 'CPP_EQUAL', 'CPP_EXCLAMATION',\n   'CPP_QUESTION', 'CPP_LPAREN', 'CPP_RPAREN', 'CPP_LBRACKET', 'CPP_RBRACKET', 'CPP_LCURLY', 'CPP_RCURLY',\n   'CPP_DOT', 'CPP_COMMA', 'CPP_SEMICOLON', 'CPP_COLON', 'CPP_BSLASH', 'CPP_SQUOTE', 'CPP_DQUOTE',\n\n   'CPP_DEREFERENCE', 'CPP_MINUSEQUAL', 'CPP_MINUSMINUS', 'CPP_LSHIFT', 'CPP_LESSEQUAL', 'CPP_RSHIFT',\n   'CPP_GREATEREQUAL', 'CPP_LOGICALOR', 'CPP_OREQUAL', 'CPP_LOGICALAND', 'CPP_ANDEQUAL', 'CPP_EQUALITY',\n   'CPP_INEQUALITY', 'CPP_XOREQUAL', 'CPP_MULTIPLYEQUAL', 'CPP_DIVIDEEQUAL', 'CPP_PLUSEQUAL', 'CPP_PLUSPLUS',\n   'CPP_PERCENTEQUAL', 'CPP_LSHIFTEQUAL', 'CPP_RSHIFTEQUAL'\n)\n\nliterals = \"+-*/%|&~^<>=!?()[]{}.,;:\\\\\\'\\\"\"\n\n# Whitespace, but don't match past the end of a line\ndef t_CPP_WS(t):\n    r'([ \\t]+|\\n)'\n    t.lexer.lineno += t.value.count(\"\\n\")\n    return t\n\n# Line continuation, accept whitespace between the backslash and new line\ndef t_CPP_LINECONT(t):\n    r'\\\\[ \\t]*\\n'\n    t.value = t.value[1:-1]\n    t.lexer.lineno += 1\n    return t\n_string_literal_linecont_pat = re.compile(r'\\\\[ \\t]*\\n')\n\nt_CPP_POUND = r'\\#'\nt_CPP_DPOUND = r'\\#\\#'\nt_CPP_PLUS = r'\\+'\nt_CPP_MINUS = r'-'\nt_CPP_STAR = r'\\*'\nt_CPP_FSLASH = r'/'\nt_CPP_PERCENT = r'%'\nt_CPP_BAR = r'\\|'\nt_CPP_AMPERSAND = r'&'\nt_CPP_TILDE = r'~'\nt_CPP_HAT = r'\\^'\nt_CPP_LESS = r'<'\nt_CPP_GREATER = r'>'\nt_CPP_EQUAL = r'='\nt_CPP_EXCLAMATION = r'!'\nt_CPP_QUESTION = r'\\?'\nt_CPP_LPAREN = r'\\('\nt_CPP_RPAREN = r'\\)'\nt_CPP_LBRACKET = r'\\['\nt_CPP_RBRACKET = r'\\]'\nt_CPP_LCURLY = r'{'\nt_CPP_RCURLY = r'}'\nt_CPP_DOT = r'\\.'\nt_CPP_COMMA = r','\nt_CPP_SEMICOLON = r';'\nt_CPP_COLON = r':'\nt_CPP_BSLASH = r'\\\\'\nt_CPP_SQUOTE = r\"'\"\nt_CPP_DQUOTE = r'\"'\n\nt_CPP_DEREFERENCE = r'->'\nt_CPP_MINUSEQUAL = r'-='\nt_CPP_MINUSMINUS = r'--'\nt_CPP_LSHIFT = r'<<'\nt_CPP_LESSEQUAL = r'<='\nt_CPP_RSHIFT = r'>>'\nt_CPP_GREATEREQUAL = r'>='\nt_CPP_LOGICALOR = r'\\|\\|'\nt_CPP_OREQUAL = r'\\|='\nt_CPP_LOGICALAND = r'&&'\nt_CPP_ANDEQUAL = r'&='\nt_CPP_EQUALITY = r'=='\nt_CPP_INEQUALITY = r'!='\nt_CPP_XOREQUAL = r'^='\nt_CPP_MULTIPLYEQUAL = r'\\*='\nt_CPP_DIVIDEEQUAL = r'/='\nt_CPP_PLUSEQUAL = r'\\+='\nt_CPP_PLUSPLUS = r'\\+\\+'\nt_CPP_PERCENTEQUAL = r'%='\nt_CPP_LSHIFTEQUAL = r'<<='\nt_CPP_RSHIFTEQUAL = r'>>='\n\n\n# Identifier\nt_CPP_ID = r'[A-Za-z_][\\w_]*'\n\n# Preprocessor number\ndef PP_NUMBER(t):\n    r\"\\.?\\d(?:\\.|[\\w_]|'[\\w_]|[eEpP][-+])*\"\n    return t\n\nt_PP_NUMBER = PP_NUMBER\n\n# String literal\ndef t_CPP_STRING(t):\n    r'\\\"([^\\\\\\n]|(\\\\(.|\\n)))*?\\\"'\n    t.value, subs_made = _string_literal_linecont_pat.subn('', t.value)\n    t.lexer.lineno += subs_made + t.value.count(\"\\n\")\n    return t\n\n# Character constant 'c' or L'c' or unicode editions thereof\ndef t_CPP_CHAR(t):\n    r'(u8|u|U|L)?\\'([^\\\\\\n]|(\\\\(.|\\n)))*?\\''\n    t.lexer.lineno += t.value.count(\"\\n\")\n    return t\n\n# Comment\ndef t_CPP_COMMENT1(t):\n    r'(/\\*(.|\\n)*?\\*/)'\n    ncr = t.value.count(\"\\n\")\n    t.lexer.lineno += ncr\n    return t\n\n# Line comment\ndef t_CPP_COMMENT2(t):\n    r'(//[^\\n]*)'\n    return t\n    \ndef t_error(t):\n    t.type = t.value[0]\n    t.value = t.value[0]\n    t.lexer.skip(1)\n    return t\n\n\n# Python 2/3 compatible way of importing a subpackage\noldsyspath = sys.path\nsys.path = [ os.path.join( os.path.dirname( os.path.abspath(__file__) ), \"ply\" ) ] + sys.path\nfrom ply import lex, yacc\nfrom ply.lex import LexToken\nsys.path = oldsyspath\ndel oldsyspath\n\n# -----------------------------------------------------------------------------\n# trigraph()\n# \n# Given an input string, this function replaces all trigraph sequences. \n# The following mapping is used:\n#\n#     ??=    #\n#     ??/    \\\n#     ??'    ^\n#     ??(    [\n#     ??)    ]\n#     ??!    |\n#     ??<    {\n#     ??>    }\n#     ??-    ~\n# -----------------------------------------------------------------------------\n\n_trigraph_pat = re.compile(r'''\\?\\?[=/\\'\\(\\)\\!<>\\-]''')\n_trigraph_rep = {\n    '=':'#',\n    '/':'\\\\',\n    \"'\":'^',\n    '(':'[',\n    ')':']',\n    '!':'|',\n    '<':'{',\n    '>':'}',\n    '-':'~'\n}\n\ndef trigraph(input):\n    return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)\n\ndef default_lexer():\n    return lex.lex(optimize=in_production)\n\n# ------------------------------------------------------------------\n# Macro object\n#\n# This object holds information about preprocessor macros\n#\n#    .name      - Macro name (string)\n#    .value     - Macro value (a list of tokens)\n#    .arglist   - List of argument names\n#    .variadic  - Boolean indicating whether or not variadic macro\n#    .vararg    - Name of the variadic parameter\n#\n# When a macro is created, the macro replacement token sequence is\n# pre-scanned and used to create patch lists that are later used\n# during macro expansion\n# ------------------------------------------------------------------\n\nclass Macro(object):\n    def __init__(self,name,value,arglist=None,variadic=False):\n        self.name = name\n        self.value = value\n        self.arglist = arglist\n        self.variadic = variadic\n        if variadic:\n            self.vararg = arglist[-1]\n        self.source = None\n        self.lineno = None\n    def __repr__(self):\n        return \"%s(%s)=%s\" % (self.name, self.arglist, self.value)\n\n# ------------------------------------------------------------------\n# Preprocessor event hooks\n#\n# Override these to customise preprocessing\n# ------------------------------------------------------------------\n\nclass Action(object):\n    \"\"\"What kind of abort processing to do in OutputDirective\"\"\"\n    IgnoreAndPassThrough = 0\n    \"\"\"Abort processing (don't execute), but pass the directive through to output\"\"\"\n    IgnoreAndRemove = 1\n    \"\"\"Abort processing (don't execute), and remove from output\"\"\"\n\nclass OutputDirective(Exception):\n    \"\"\"Raise this exception to abort processing of a preprocessor directive and\n    to instead output it as is into the output\"\"\"\n    def __init__(self, action):\n        self.action = action\n\nclass PreprocessorHooks(object):\n    \"\"\"Override these in your subclass of Preprocessor to customise preprocessing\"\"\"\n    def __init__(self):\n        self.lastdirective = None\n\n    def on_error(self,file,line,msg):\n        \"\"\"Called when the preprocessor has encountered an error, e.g. malformed input.\n        \n        The default simply prints to stderr and increments the return code.\n        \"\"\"\n        print(\"%s:%d error: %s\" % (file,line,msg), file = sys.stderr)\n        self.return_code += 1\n        \n    def on_file_open(self,is_system_include,includepath):\n        \"\"\"Called to open a file for reading.\n        \n        This hook provides the ability to use ``chardet``, or any other mechanism,\n        to inspect a file for its text encoding, and open it appropriately. Be\n        aware that this function is used to probe for possible include file locations,\n        so ``includepath`` may not exist. If it does not, raise the appropriate\n        ``IOError`` exception.\n        \n        The default calls ``io.open(includepath, 'r', encoding = self.assume_encoding)``,\n        examines if it starts with a BOM (if so, it removes it), and returns the file\n        object opened. This raises the appropriate exception if the path was not found.\n        \"\"\"\n        ret = open(includepath, 'r', encoding = self.assume_encoding)\n        bom = ret.read(1)\n        #print(repr(bom))\n        if bom != '\\ufeff':\n            ret.seek(0)\n        return ret\n\n    def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n        \"\"\"Called when a #include wasn't found.\n        \n        Raise OutputDirective to pass through or remove, else return\n        a suitable path. Remember that Preprocessor.add_path() lets you add search paths.\n        \n        The default calls ``self.on_error()`` with a suitable error message about the\n        include file not found if ``is_malformed`` is False, else a suitable error\n        message about a malformed #include, and in both cases raises OutputDirective\n        (pass through).\n        \"\"\"\n        if is_malformed:\n            self.on_error(self.lastdirective.source,self.lastdirective.lineno, \"Malformed #include statement: %s\" % includepath)\n        else:\n            self.on_error(self.lastdirective.source,self.lastdirective.lineno, \"Include file '%s' not found\" % includepath)\n        raise OutputDirective(Action.IgnoreAndPassThrough)\n        \n    def on_unknown_macro_in_defined_expr(self,tok):\n        \"\"\"Called when an expression passed to an #if contained a defined operator\n        performed on something unknown.\n        \n        Return True if to treat it as defined, False if to treat it as undefined,\n        raise OutputDirective to pass through without execution, or return None to\n        pass through the mostly expanded #if expression apart from the unknown defined.\n        \n        The default returns False, as per the C standard.\n        \"\"\"\n        return False\n\n    def on_unknown_macro_in_expr(self,ident):\n        \"\"\"Called when an expression passed to an #if contained an unknown identifier.\n        \n        Return what value the expression evaluator ought to use, or return None to\n        pass through the mostly expanded #if expression.\n        \n        The default returns an integer 0, as per the C standard.\n        \"\"\"\n        return 0\n    \n    def on_unknown_macro_function_in_expr(self,ident):\n        \"\"\"Called when an expression passed to an #if contained an unknown function.\n        \n        Return a callable which will be invoked by the expression evaluator to\n        evaluate the input to the function, or return None to pass through the\n        mostly expanded #if expression.\n        \n        The default returns a lambda which returns integer 0, as per the C standard.\n        \"\"\"\n        return lambda x : 0\n    \n    def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n        \"\"\"Called when there is one of\n        \n        define, include, undef, ifdef, ifndef, if, elif, else, endif\n        \n        Return True to execute and remove from the output, raise OutputDirective\n        to pass through or remove without execution, or return None to execute\n        AND pass through to the output (this only works for #define, #undef).\n        \n        The default returns True (execute and remove from the output).\n\n        directive is the directive, toks is the tokens after the directive,\n        ifpassthru is whether we are in passthru mode, precedingtoks is the\n        tokens preceding the directive from the # token until the directive.\n        \"\"\"\n        self.lastdirective = directive\n        return True\n        \n    def on_directive_unknown(self,directive,toks,ifpassthru,precedingtoks):\n        \"\"\"Called when the preprocessor encounters a #directive it doesn't understand.\n        This is actually quite an extensive list as it currently only understands:\n        \n        define, include, undef, ifdef, ifndef, if, elif, else, endif\n        \n        Return True to remove from the output, raise OutputDirective\n        to pass through or remove, or return None to\n        pass through into the output.\n        \n        The default handles #error and #warning by printing to stderr and returning True\n        (remove from output). For everything else it returns None (pass through into output).\n\n        directive is the directive, toks is the tokens after the directive,\n        ifpassthru is whether we are in passthru mode, precedingtoks is the\n        tokens preceding the directive from the # token until the directive.\n        \"\"\"\n        if directive.value == 'error':\n            print(\"%s:%d error: %s\" % (directive.source,directive.lineno,''.join(tok.value for tok in toks)), file = sys.stderr)\n            self.return_code += 1\n            return True\n        elif directive.value == 'warning':\n            print(\"%s:%d warning: %s\" % (directive.source,directive.lineno,''.join(tok.value for tok in toks)), file = sys.stderr)\n            return True\n        return None\n        \n    def on_potential_include_guard(self,macro):\n        \"\"\"Called when the preprocessor encounters an #ifndef macro or an #if !defined(macro)\n        as the first non-whitespace thing in a file. Unlike the other hooks, macro is a string,\n        not a token.\n        \"\"\"\n        pass\n    \n    def on_comment(self,tok):\n        \"\"\"Called when the preprocessor encounters a comment token. You can modify the token\n        in place. You must return True to let the comment pass through, else it will be removed.\n        \n        Returning False or None modifies the token to become whitespace, becoming a single space\n        if the comment is a block comment, else a single new line if the comment is a line comment.\n        \"\"\"\n        return None\n\n"
  },
  {
    "path": "pcpp/parsetab.py",
    "content": "\n# parsetab.py\n# This file is automatically generated. Do not edit.\n# pylint: disable=W,C,R\n_tabversion = '3.10'\n\n_lr_method = 'LALR'\n\n_lr_signature = 'leftCPP_COMMAleftCPP_QUESTIONCPP_COLONleftCPP_LOGICALORleftCPP_LOGICALANDleftCPP_BARleftCPP_HATleftCPP_AMPERSANDleftCPP_EQUALITYCPP_INEQUALITYleftCPP_LESSCPP_LESSEQUALCPP_GREATERCPP_GREATEREQUALleftCPP_LSHIFTCPP_RSHIFTleftCPP_PLUSCPP_MINUSleftCPP_STARCPP_FSLASHCPP_PERCENTrightUPLUSUMINUSCPP_EXCLAMATIONCPP_TILDECPP_AMPERSAND CPP_BAR CPP_CHAR CPP_COLON CPP_COMMA CPP_EQUALITY CPP_EXCLAMATION CPP_FSLASH CPP_GREATER CPP_GREATEREQUAL CPP_HAT CPP_ID CPP_INEQUALITY CPP_LESS CPP_LESSEQUAL CPP_LOGICALAND CPP_LOGICALOR CPP_LPAREN CPP_LSHIFT CPP_MINUS CPP_PERCENT CPP_PLUS CPP_QUESTION CPP_RPAREN CPP_RSHIFT CPP_STAR CPP_STRING CPP_TILDE PP_NUMBERexpression : PP_NUMBERexpression : CPP_CHAR\\n    expression : CPP_STRING\\n              | CPP_LESS expression CPP_GREATER\\n    expression : CPP_LPAREN expression CPP_RPARENexpression : CPP_PLUS expression %prec UPLUSexpression : CPP_MINUS expression %prec UMINUS\\n    expression : CPP_EXCLAMATION expression\\n              | CPP_TILDE expression\\n    \\n    expression : expression CPP_STAR expression\\n              | expression CPP_FSLASH expression\\n              | expression CPP_PERCENT expression\\n              | expression CPP_PLUS expression\\n              | expression CPP_MINUS expression\\n              | expression CPP_LSHIFT expression\\n              | expression CPP_RSHIFT expression\\n              | expression CPP_LESS expression\\n              | expression CPP_LESSEQUAL expression\\n              | expression CPP_GREATER expression\\n              | expression CPP_GREATEREQUAL expression\\n              | expression CPP_EQUALITY expression\\n              | expression CPP_INEQUALITY expression\\n              | expression CPP_AMPERSAND expression\\n              | expression CPP_HAT expression\\n              | expression CPP_BAR expression\\n              | expression CPP_LOGICALAND expression\\n              | expression CPP_LOGICALOR expression\\n              | expression CPP_COMMA expression\\n    expression : expression CPP_QUESTION expression CPP_COLON expressionexpression : CPP_ID CPP_LPAREN expression CPP_RPARENexpression : CPP_ID'\n    \n_lr_action_items = {'PP_NUMBER':([0,5,6,7,8,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,38,59,62,],[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,]),'CPP_CHAR':([0,5,6,7,8,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,38,59,62,],[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,]),'CPP_STRING':([0,5,6,7,8,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,38,59,62,],[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,]),'CPP_LESS':([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,],[5,19,-1,-2,-3,5,5,5,5,5,5,-31,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,19,19,-6,-7,-8,-9,5,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,19,19,19,19,19,19,19,19,19,-4,-5,19,5,-30,19,]),'CPP_LPAREN':([0,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,38,59,62,],[6,6,6,6,6,6,6,38,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,]),'CPP_PLUS':([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,],[7,15,-1,-2,-3,7,7,7,7,7,7,-31,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,15,15,-6,-7,-8,-9,7,-10,-11,-12,-13,-14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,7,-5,15,7,-30,15,]),'CPP_MINUS':([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,],[8,16,-1,-2,-3,8,8,8,8,8,8,-31,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,16,16,-6,-7,-8,-9,8,-10,-11,-12,-13,-14,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,8,-5,16,8,-30,16,]),'CPP_EXCLAMATION':([0,5,6,7,8,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,38,59,62,],[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,]),'CPP_TILDE':([0,5,6,7,8,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,38,59,62,],[10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,]),'CPP_ID':([0,5,6,7,8,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,38,59,62,],[11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,]),'$end':([1,2,3,4,11,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,59,60,63,64,],[0,-1,-2,-3,-31,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-4,-5,-30,-29,]),'CPP_STAR':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[12,-1,-2,-3,-31,12,12,-6,-7,-8,-9,-10,-11,-12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,-4,-5,12,-30,12,]),'CPP_FSLASH':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[13,-1,-2,-3,-31,13,13,-6,-7,-8,-9,-10,-11,-12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,-4,-5,13,-30,13,]),'CPP_PERCENT':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[14,-1,-2,-3,-31,14,14,-6,-7,-8,-9,-10,-11,-12,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,-4,-5,14,-30,14,]),'CPP_LSHIFT':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[17,-1,-2,-3,-31,17,17,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,17,17,17,17,17,17,17,17,17,17,17,17,17,-4,-5,17,-30,17,]),'CPP_RSHIFT':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[18,-1,-2,-3,-31,18,18,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,18,18,18,18,18,18,18,18,18,18,18,18,18,-4,-5,18,-30,18,]),'CPP_LESSEQUAL':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[20,-1,-2,-3,-31,20,20,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,20,20,20,20,20,20,20,20,20,-4,-5,20,-30,20,]),'CPP_GREATER':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[21,-1,-2,-3,-31,59,21,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,21,21,21,21,21,21,21,21,21,-4,-5,21,-30,21,]),'CPP_GREATEREQUAL':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[22,-1,-2,-3,-31,22,22,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,22,22,22,22,22,22,22,22,22,-4,-5,22,-30,22,]),'CPP_EQUALITY':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[23,-1,-2,-3,-31,23,23,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,23,23,23,23,23,23,23,-4,-5,23,-30,23,]),'CPP_INEQUALITY':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[24,-1,-2,-3,-31,24,24,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,24,24,24,24,24,24,24,-4,-5,24,-30,24,]),'CPP_AMPERSAND':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[25,-1,-2,-3,-31,25,25,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,25,25,25,25,25,25,-4,-5,25,-30,25,]),'CPP_HAT':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[26,-1,-2,-3,-31,26,26,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,26,26,26,26,26,-4,-5,26,-30,26,]),'CPP_BAR':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[27,-1,-2,-3,-31,27,27,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,27,27,27,27,-4,-5,27,-30,27,]),'CPP_LOGICALAND':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[28,-1,-2,-3,-31,28,28,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,28,28,28,-4,-5,28,-30,28,]),'CPP_LOGICALOR':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[29,-1,-2,-3,-31,29,29,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,29,29,-4,-5,29,-30,29,]),'CPP_COMMA':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[30,-1,-2,-3,-31,30,30,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,30,-4,-5,30,-30,-29,]),'CPP_QUESTION':([1,2,3,4,11,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,63,64,],[31,-1,-2,-3,-31,31,31,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,31,31,-4,-5,31,-30,-29,]),'CPP_RPAREN':([2,3,4,11,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,59,60,61,63,64,],[-1,-2,-3,-31,60,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-4,-5,63,-30,-29,]),'CPP_COLON':([2,3,4,11,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,63,64,],[-1,-2,-3,-31,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,62,-4,-5,-30,-29,]),}\n\n_lr_action = {}\nfor _k, _v in _lr_action_items.items():\n   for _x,_y in zip(_v[0],_v[1]):\n      if not _x in _lr_action:  _lr_action[_x] = {}\n      _lr_action[_x][_k] = _y\ndel _lr_action_items\n\n_lr_goto_items = {'expression':([0,5,6,7,8,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,38,59,62,],[1,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,61,48,64,]),}\n\n_lr_goto = {}\nfor _k, _v in _lr_goto_items.items():\n   for _x, _y in zip(_v[0], _v[1]):\n       if not _x in _lr_goto: _lr_goto[_x] = {}\n       _lr_goto[_x][_k] = _y\ndel _lr_goto_items\n_lr_productions = [\n  (\"S' -> expression\",\"S'\",1,None,None,None),\n  ('expression -> PP_NUMBER','expression',1,'p_expression_number','evaluator.py',397),\n  ('expression -> CPP_CHAR','expression',1,'p_expression_character','evaluator.py',405),\n  ('expression -> CPP_STRING','expression',1,'p_expression_string','evaluator.py',410),\n  ('expression -> CPP_LESS expression CPP_GREATER','expression',3,'p_expression_string','evaluator.py',411),\n  ('expression -> CPP_LPAREN expression CPP_RPAREN','expression',3,'p_expression_group','evaluator.py',416),\n  ('expression -> CPP_PLUS expression','expression',2,'p_expression_uplus','evaluator.py',420),\n  ('expression -> CPP_MINUS expression','expression',2,'p_expression_uminus','evaluator.py',424),\n  ('expression -> CPP_EXCLAMATION expression','expression',2,'p_expression_unop','evaluator.py',429),\n  ('expression -> CPP_TILDE expression','expression',2,'p_expression_unop','evaluator.py',430),\n  ('expression -> expression CPP_STAR expression','expression',3,'p_expression_binop','evaluator.py',442),\n  ('expression -> expression CPP_FSLASH expression','expression',3,'p_expression_binop','evaluator.py',443),\n  ('expression -> expression CPP_PERCENT expression','expression',3,'p_expression_binop','evaluator.py',444),\n  ('expression -> expression CPP_PLUS expression','expression',3,'p_expression_binop','evaluator.py',445),\n  ('expression -> expression CPP_MINUS expression','expression',3,'p_expression_binop','evaluator.py',446),\n  ('expression -> expression CPP_LSHIFT expression','expression',3,'p_expression_binop','evaluator.py',447),\n  ('expression -> expression CPP_RSHIFT expression','expression',3,'p_expression_binop','evaluator.py',448),\n  ('expression -> expression CPP_LESS expression','expression',3,'p_expression_binop','evaluator.py',449),\n  ('expression -> expression CPP_LESSEQUAL expression','expression',3,'p_expression_binop','evaluator.py',450),\n  ('expression -> expression CPP_GREATER expression','expression',3,'p_expression_binop','evaluator.py',451),\n  ('expression -> expression CPP_GREATEREQUAL expression','expression',3,'p_expression_binop','evaluator.py',452),\n  ('expression -> expression CPP_EQUALITY expression','expression',3,'p_expression_binop','evaluator.py',453),\n  ('expression -> expression CPP_INEQUALITY expression','expression',3,'p_expression_binop','evaluator.py',454),\n  ('expression -> expression CPP_AMPERSAND expression','expression',3,'p_expression_binop','evaluator.py',455),\n  ('expression -> expression CPP_HAT expression','expression',3,'p_expression_binop','evaluator.py',456),\n  ('expression -> expression CPP_BAR expression','expression',3,'p_expression_binop','evaluator.py',457),\n  ('expression -> expression CPP_LOGICALAND expression','expression',3,'p_expression_binop','evaluator.py',458),\n  ('expression -> expression CPP_LOGICALOR expression','expression',3,'p_expression_binop','evaluator.py',459),\n  ('expression -> expression CPP_COMMA expression','expression',3,'p_expression_binop','evaluator.py',460),\n  ('expression -> expression CPP_QUESTION expression CPP_COLON expression','expression',5,'p_expression_conditional','evaluator.py',506),\n  ('expression -> CPP_ID CPP_LPAREN expression CPP_RPAREN','expression',4,'p_expression_function_call','evaluator.py',518),\n  ('expression -> CPP_ID','expression',1,'p_expression_identifier','evaluator.py',525),\n]\n"
  },
  {
    "path": "pcpp/pcmd.py",
    "content": "#!/usr/bin/python\n# Python C99 conforming preprocessor command line\n# (C) 2017-2026 Niall Douglas http://www.nedproductions.biz/\n# Started: March 2017\n\nimport sys, argparse, traceback, os, copy, io, re\nif __name__ == '__main__' and __package__ is None:\n    sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )\nfrom pcpp.preprocessor import Preprocessor, OutputDirective, Action\n\nversion='1.31'\n\n__all__ = []\n\nclass FileAction(argparse.Action):\n    def __init__(self, option_strings, dest, **kwargs):\n        super(FileAction, self).__init__(option_strings, dest, **kwargs)\n        \n    def __call__(self, parser, namespace, values, option_string=None):\n        if getattr(namespace, self.dest)[0] == sys.stdin:\n            items = []\n        else:\n            items = copy.copy(getattr(namespace, self.dest))\n        items += [argparse.FileType('rt')(value) for value in values]\n        setattr(namespace, self.dest, items)\n\nclass CmdPreprocessor(Preprocessor):\n    def __init__(self, argv):\n        if len(argv) < 2:\n            argv = [argv[0], '--help']\n        argp = argparse.ArgumentParser(prog='pcpp',\n            description=\n    '''A pure universal Python C (pre-)preprocessor implementation very useful for\n    pre-preprocessing header only C++ libraries into single file includes and\n    other such build or packaging stage malarky.''',\n            epilog=\n    '''Note that so pcpp can stand in for other preprocessor tooling, it\n    ignores any arguments it does not understand.''')\n        argp.add_argument('inputs', metavar = 'input', default = [sys.stdin], nargs = '*', action = FileAction, help = 'Files to preprocess (use \\'-\\' for stdin)')\n        argp.add_argument('-o', dest = 'output', metavar = 'path', type = argparse.FileType('wt'), default=sys.stdout, nargs = '?', help = 'Output to a file instead of stdout')\n        argp.add_argument('-D', dest = 'defines', metavar = 'macro[=val]', nargs = 1, action = 'append', help = 'Predefine name as a macro [with value]')\n        argp.add_argument('-U', dest = 'undefines', metavar = 'macro', nargs = 1, action = 'append', help = 'Pre-undefine name as a macro')\n        argp.add_argument('-N', dest = 'nevers', metavar = 'macro', nargs = 1, action = 'append', help = 'Never define name as a macro, even if defined during the preprocessing.')\n        argp.add_argument('-I', dest = 'includes', metavar = 'path', nargs = 1, action = 'append', help = \"Path to search for unfound #include's\")\n        #argp.add_argument('--passthru', dest = 'passthru', action = 'store_true', help = 'Pass through everything unexecuted except for #include and include guards (which need to be the first thing in an include file')\n        argp.add_argument('--passthru-defines', dest = 'passthru_defines', action = 'store_true', help = 'Pass through but still execute #defines and #undefs if not always removed by preprocessor logic')\n        argp.add_argument('--passthru-unfound-includes', dest = 'passthru_unfound_includes', action = 'store_true', help = 'Pass through #includes not found without execution')\n        argp.add_argument('--passthru-unknown-exprs', dest = 'passthru_undefined_exprs', action = 'store_true', help = 'Unknown macros in expressions cause preprocessor logic to be passed through instead of executed by treating unknown macros as 0L')\n        argp.add_argument('--passthru-comments', dest = 'passthru_comments', action = 'store_true', help = 'Pass through comments unmodified')\n        argp.add_argument('--passthru-magic-macros', dest = 'passthru_magic_macros', action = 'store_true', help = 'Pass through double underscore magic macros unmodified')\n        argp.add_argument('--passthru-has-include', dest = 'passthru_has_include', action = 'store_true', help = 'Pass through __has_include expressions unmodified')\n        argp.add_argument('--passthru-includes', dest = 'passthru_includes', metavar = '<regex>', default = None, nargs = 1, help = \"Regular expression for which #includes to not expand. #includes, if found, are always executed\")\n        argp.add_argument('--disable-auto-pragma-once', dest = 'auto_pragma_once_disabled', action = 'store_true', default = False, help = 'Disable the heuristics which auto apply #pragma once to #include files wholly wrapped in an obvious include guard macro')\n        argp.add_argument('--enable-include-next', dest = 'include_next_enabled', action = 'store_true', default = False, help = 'Enable #include_next support, which you should try to avoid using')\n        argp.add_argument('--line-directive', dest = 'line_directive', metavar = 'form', default = '#line', nargs = '?', help = \"Form of line directive to use, defaults to #line, specify nothing to disable output of line directives\")\n        argp.add_argument('--debug', dest = 'debug', action = 'store_true', help = 'Generate a pcpp_debug.log file logging execution')\n        argp.add_argument('--time', dest = 'time', action = 'store_true', help = 'Print the time it took to #include each file')\n        argp.add_argument('--filetimes', dest = 'filetimes', metavar = 'path', type = argparse.FileType('wt'), default=None, nargs = '?', help = 'Write CSV file with time spent inside each included file, inclusive and exclusive')\n        argp.add_argument('--compress', dest = 'compress', action = 'store_true', help = 'Make output as small as possible')\n        argp.add_argument('--assume-input-encoding', dest = 'assume_input_encoding', metavar = '<encoding>', default = None, nargs = 1, help = 'The text encoding to assume inputs are in')\n        argp.add_argument('--output-encoding', dest = 'output_encoding', metavar = '<encoding>', default = None, nargs = 1, help = 'The text encoding to use when writing files')\n        argp.add_argument('--write-bom', dest = 'write_bom', action = 'store_true', help = 'Prefix any output with a Unicode BOM')\n        argp.add_argument('--trigraphs', dest = 'enable_trigraphs', action = 'store_true', help = 'Enable processing trigraphs')\n        argp.add_argument('--version', action='version', version='pcpp ' + version)\n        args = argp.parse_known_args(argv[1:])\n        #print(args)\n        for arg in args[1]:\n            print(\"NOTE: Argument %s not known, ignoring!\" % arg, file = sys.stderr)\n\n        self.args = args[0]\n        super(CmdPreprocessor, self).__init__()\n        \n        # Override Preprocessor instance variables\n        self.define(\"__PCPP_VERSION__ \" + version)\n        self.define(\"__PCPP_ALWAYS_FALSE__ 0\")\n        self.define(\"__PCPP_ALWAYS_TRUE__ 1\")\n        if self.args.debug:\n            self.debugout = open(\"pcpp_debug.log\", \"wt\")\n        self.auto_pragma_once_enabled = not self.args.auto_pragma_once_disabled\n        self.include_next_enabled = self.args.include_next_enabled\n        self.line_directive = self.args.line_directive\n        if self.line_directive is not None and self.line_directive.lower() in ('nothing', 'none', ''):\n            self.line_directive = None\n        if self.args.passthru_includes is not None:\n            self.passthru_includes = re.compile(self.args.passthru_includes[0])\n        self.compress = 2 if self.args.compress else 0\n        if self.args.passthru_magic_macros:\n            self.undef('__DATE__')\n            self.undef('__TIME__')\n            self.expand_linemacro = False\n            self.expand_filemacro = False\n            self.expand_countermacro = False\n        self.passthru_expr_has_include = self.args.passthru_has_include\n        if self.args.assume_input_encoding is not None:\n            self.args.assume_input_encoding = self.args.assume_input_encoding[0]\n            self.assume_encoding = self.args.assume_input_encoding\n            if len(self.args.inputs) == 1:\n                # Reopen our input files with the appropriate encoding\n                _ = self.on_file_open(False, self.args.inputs[0].name)\n                self.args.inputs[0].close()\n                self.args.inputs[0] = _\n            if self.args.output_encoding is None:\n                self.args.output_encoding = self.args.assume_input_encoding\n        if self.args.output_encoding is not None:\n            self.args.output_encoding = self.args.output_encoding[0]\n            # Reopen our output file with the appropriate encoding\n            _ = io.open(self.args.output.name, 'w', encoding = self.args.output_encoding)\n            self.args.output.close()\n            self.args.output = _\n            if self.args.write_bom:\n                self.args.output.write('\\ufeff')\n        self.enable_trigraphs = self.args.enable_trigraphs\n        \n        # My own instance variables\n        self.bypass_ifpassthru = False\n        self.potential_include_guard = None\n\n        if self.args.defines:\n            self.args.defines = [x[0] for x in self.args.defines]\n            for d in self.args.defines:\n                if '=' not in d:\n                    d += '=1'\n                d = d.replace('=', ' ', 1)\n                self.define(d)\n        if self.args.undefines:\n            self.args.undefines = [x[0] for x in self.args.undefines]\n            for d in self.args.undefines:\n                self.undef(d)\n        if self.args.nevers:\n            self.args.nevers = [x[0] for x in self.args.nevers]\n        if self.args.includes:\n            self.args.includes = [x[0] for x in self.args.includes]\n            for d in self.args.includes:\n                self.add_path(d)\n\n        try:\n            if len(self.args.inputs) == 1:\n                self.parse(self.args.inputs[0])\n            else:\n                input = ''\n                for i in self.args.inputs:\n                    input += '#include \"' + i.name + '\"\\n'\n                self.parse(input)\n            self.write(self.args.output)\n        except:\n            print(traceback.print_exc(10), file = sys.stderr)\n            print(\"\\nINTERNAL PREPROCESSOR ERROR AT AROUND %s:%d, FATALLY EXITING NOW\\n\"\n                % (self.lastdirective.source, self.lastdirective.lineno), file = sys.stderr)\n            sys.exit(-99)\n        finally:\n            for i in self.args.inputs:\n                i.close()\n            if self.args.output != sys.stdout:\n                self.args.output.close()\n        \n        if self.args.time:\n            print(\"\\nTime report:\")\n            print(\"============\")\n            for n in range(0, len(self.include_times)):\n                if n == 0:\n                    print(\"top level: %f seconds\" % self.include_times[n].elapsed)\n                elif self.include_times[n].depth == 1:\n                    print(\"\\n %s: %f seconds (%f%%)\" % (self.include_times[n].included_path, self.include_times[n].elapsed, 100 * self.include_times[n].elapsed / self.include_times[0].elapsed))\n                else:\n                    print(\"%s%s: %f seconds\" % (' ' * self.include_times[n].depth, self.include_times[n].included_path, self.include_times[n].elapsed))\n            print(\"\\nPragma once files (including heuristically applied):\")\n            print(\"====================================================\")\n            for i in self.include_once:\n                print(\" \", i)\n            print()\n        if self.args.filetimes:\n            print('\"Total seconds\",\"Self seconds\",\"File size\",\"File path\"', file = self.args.filetimes)\n            filetimes = {}\n            currentfiles = []\n            for n in range(0, len(self.include_times)):\n                while self.include_times[n].depth < len(currentfiles):\n                    currentfiles.pop()\n                if self.include_times[n].depth > len(currentfiles) - 1:\n                    currentfiles.append(self.include_times[n].included_abspath)\n                #print()\n                #for path in currentfiles:\n                #    print(\"currentfiles =\", path)\n                path = currentfiles[-1]\n                if path in filetimes:\n                    filetimes[path][0] += self.include_times[n].elapsed\n                    filetimes[path][1] += self.include_times[n].elapsed\n                else:\n                    filetimes[path] = [self.include_times[n].elapsed, self.include_times[n].elapsed]\n                if self.include_times[n].elapsed > 0 and len(currentfiles) > 1:\n                    #print(\"Removing child %f from parent %s = %f\" % (self.include_times[n].elapsed, currentfiles[-2], filetimes[currentfiles[-2]]))\n                    filetimes[currentfiles[-2]][1] -= self.include_times[n].elapsed\n            filetimes = [(v[0],v[1],k) for k,v in filetimes.items()]\n            filetimes.sort(reverse=True)\n            for t,s,p in filetimes:\n                print(('%f,%f,%d,\"%s\"' % (t, s, os.stat(p).st_size, p)), file = self.args.filetimes)\n    def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n        if self.args.passthru_unfound_includes:\n            raise OutputDirective(Action.IgnoreAndPassThrough)\n        return super(CmdPreprocessor, self).on_include_not_found(is_malformed,is_system_include,curdir,includepath)\n\n    def on_unknown_macro_in_defined_expr(self,tok):\n        if self.args.undefines:\n            if tok.value in self.args.undefines:\n                return False\n        if self.args.passthru_undefined_exprs:\n            return None  # Pass through as expanded as possible\n        return super(CmdPreprocessor, self).on_unknown_macro_in_defined_expr(tok)\n        \n    def on_unknown_macro_in_expr(self,ident):\n        if self.args.undefines:\n            if ident in self.args.undefines:\n                return super(CmdPreprocessor, self).on_unknown_macro_in_expr(ident)\n        if self.args.passthru_undefined_exprs:\n            return None  # Pass through as expanded as possible\n        return super(CmdPreprocessor, self).on_unknown_macro_in_expr(ident)\n        \n    def on_unknown_macro_function_in_expr(self,ident):\n        if self.args.undefines:\n            if ident in self.args.undefines:\n                return super(CmdPreprocessor, self).on_unknown_macro_function_in_expr(ident)\n        if self.args.passthru_undefined_exprs:\n            return None  # Pass through as expanded as possible\n        return super(CmdPreprocessor, self).on_unknown_macro_function_in_expr(ident)\n        \n    def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n        if ifpassthru:\n            if directive.value == 'if' or directive.value == 'elif' or directive == 'else' or directive.value == 'endif':\n                self.bypass_ifpassthru = len([tok for tok in toks if tok.value == '__PCPP_ALWAYS_FALSE__' or tok.value == '__PCPP_ALWAYS_TRUE__']) > 0\n            if not self.bypass_ifpassthru and (directive.value == 'define' or directive.value == 'undef'):\n                if toks[0].value != self.potential_include_guard:\n                    raise OutputDirective(Action.IgnoreAndPassThrough)  # Don't execute anything with effects when inside an #if expr with undefined macro\n        if (directive.value == 'define' or directive.value == 'undef') and self.args.nevers:\n            if toks[0].value in self.args.nevers:\n                raise OutputDirective(Action.IgnoreAndPassThrough)\n        if self.args.passthru_defines:\n            super(CmdPreprocessor, self).on_directive_handle(directive,toks,ifpassthru,precedingtoks)\n            return None  # Pass through where possible\n        return super(CmdPreprocessor, self).on_directive_handle(directive,toks,ifpassthru,precedingtoks)\n\n    def on_directive_unknown(self,directive,toks,ifpassthru,precedingtoks):\n        if ifpassthru:\n            return None  # Pass through\n        return super(CmdPreprocessor, self).on_directive_unknown(directive,toks,ifpassthru,precedingtoks)\n\n    def on_potential_include_guard(self,macro):\n        self.potential_include_guard = macro\n        return super(CmdPreprocessor, self).on_potential_include_guard(macro)\n\n    def on_comment(self,tok):\n        if self.args.passthru_comments:\n            return True  # Pass through\n        return super(CmdPreprocessor, self).on_comment(tok)\n\ndef main(argv=None):\n    if argv is None:\n        argv = sys.argv\n    p = CmdPreprocessor(argv)\n    return p.return_code\n        \nif __name__ == \"__main__\":\n    sys.exit(main(sys.argv))\n"
  },
  {
    "path": "pcpp/preprocessor.py",
    "content": "#!/usr/bin/python\n# Python C99 conforming preprocessor useful for generating single include files\n# (C) 2017-2026 Niall Douglas http://www.nedproductions.biz/\n# and (C) 2007-2017 David Beazley http://www.dabeaz.com/\n# Started: Feb 2017\n#\n# This C preprocessor was originally written by David Beazley and the\n# original can be found at https://github.com/dabeaz/ply/blob/master/ply/cpp.py\n# This edition substantially improves on standards conforming output,\n# getting quite close to what clang or GCC outputs.\n\nimport sys, os, re, codecs, time, copy, traceback\nif __name__ == '__main__' and __package__ is None:\n    sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )\nfrom pcpp.parser import STRING_TYPES, default_lexer, trigraph, Macro, Action, OutputDirective, PreprocessorHooks\nfrom pcpp.evaluator import Evaluator\n\nimport io\nFILE_TYPES = io.IOBase\nclock = time.process_time\n\n__all__ = ['Preprocessor', 'PreprocessorHooks', 'OutputDirective', 'Action', 'Evaluator']\n\n# ------------------------------------------------------------------\n# File inclusion timings\n#\n# Useful for figuring out how long a sequence of preprocessor inclusions actually is\n# ------------------------------------------------------------------\n\nclass FileInclusionTime(object):\n    \"\"\"The seconds taken to #include another file\"\"\"\n    def __init__(self,including_path,included_path,included_abspath,depth):\n        self.including_path = including_path\n        self.included_path = included_path\n        self.included_abspath = included_abspath\n        self.depth = depth\n        self.elapsed = 0.0\n\n# ------------------------------------------------------------------\n# Preprocessor object\n#\n# Object representing a preprocessor.  Contains macro definitions,\n# include directories, and other information\n# ------------------------------------------------------------------\n\nclass Preprocessor(PreprocessorHooks):    \n    def __init__(self,lexer=None):\n        super(Preprocessor, self).__init__()\n        if lexer is None:\n            lexer = default_lexer()\n        self.lexer = lexer\n        self.evaluator = Evaluator(self.lexer)\n        self.macros = { }\n        self.path = []           # list of -I formal search paths for includes\n        self.temp_path = []      # list of temporary search paths for includes\n        self.rewrite_paths = [(re.escape(os.path.abspath('') + os.sep) + '(.*)', '\\\\1')]\n        self.passthru_includes = None\n        self.passthru_expr_has_include = False\n        self.include_once = {}\n        self.include_depth = 0\n        self.include_times = []  # list of FileInclusionTime\n        self.return_code = 0\n        self.debugout = None\n        self.auto_pragma_once_enabled = True\n        self.include_next_enabled = False\n        self.line_directive = '#line'\n        self.compress = False\n        self.assume_encoding = None\n        self.enable_trigraphs = False\n\n        # Probe the lexer for selected tokens\n        self.__lexprobe()\n\n        tm = time.localtime()\n        self.define(\"__DATE__ \\\"%s\\\"\" % time.strftime(\"%b %d %Y\",tm))\n        self.define(\"__TIME__ \\\"%s\\\"\" % time.strftime(\"%H:%M:%S\",tm))\n        self.define(\"__PCPP__ 1\")\n        self.expand_linemacro = True\n        self.expand_filemacro = True\n        self.expand_countermacro = True\n        self.linemacro = 0\n        self.linemacrodepth = 0\n        self.countermacro = 0\n        self.current_include_next_unique_ids = []\n        self.parser = None\n\n    @staticmethod\n    def __file_unique_id(fh):\n        s = os.stat(fh.fileno())\n        return s.st_ino ^ s.st_size\n    \n    # -----------------------------------------------------------------------------\n    # tokenize()\n    #\n    # Utility function. Given a string of text, tokenize into a list of tokens\n    # -----------------------------------------------------------------------------\n\n    def tokenize(self,text):\n        \"\"\"Utility function. Given a string of text, tokenize into a list of tokens\"\"\"\n        tokens = []\n        self.lexer.input(text)\n        while True:\n            tok = self.lexer.token()\n            if not tok: break\n            tok.source = ''\n            tokens.append(tok)\n        return tokens\n\n    # ----------------------------------------------------------------------\n    # __lexprobe()\n    #\n    # This method probes the preprocessor lexer object to discover\n    # the token types of symbols that are important to the preprocessor.\n    # If this works right, the preprocessor will simply \"work\"\n    # with any suitable lexer regardless of how tokens have been named.\n    # ----------------------------------------------------------------------\n\n    def __lexprobe(self):\n\n        # Determine the token type for identifiers\n        self.lexer.input(\"identifier\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"identifier\":\n            print(\"Couldn't determine identifier type\")\n        else:\n            self.t_ID = tok.type\n\n        # Determine the token type for integers\n        self.lexer.input(\"12345\")\n        tok = self.lexer.token()\n        if not tok or int(tok.value) != 12345:\n            print(\"Couldn't determine integer type\")\n        else:\n            self.t_INTEGER = tok.type\n            self.t_INTEGER_TYPE = type(tok.value)\n\n        # Determine the token type for character\n        self.lexer.input(\"'a'\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"'a'\":\n            print(\"Couldn't determine character type\")\n        else:\n            self.t_CHAR = tok.type\n            \n        # Determine the token type for strings enclosed in double quotes\n        self.lexer.input(\"\\\"filename\\\"\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"\\\"filename\\\"\":\n            print(\"Couldn't determine string type\")\n        else:\n            self.t_STRING = tok.type\n\n        # Determine the token type for whitespace--if any\n        self.lexer.input(\"  \")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"  \":\n            self.t_SPACE = None\n        else:\n            self.t_SPACE = tok.type\n\n        # Determine the token type for newlines\n        self.lexer.input(\"\\n\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"\\n\":\n            self.t_NEWLINE = None\n            print(\"Couldn't determine token for newlines\")\n        else:\n            self.t_NEWLINE = tok.type\n\n        # Determine the token type for line continuations\n        self.lexer.input(\"\\\\     \\n\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"     \":\n            self.t_LINECONT = None\n            print(\"Couldn't determine token for line continuations\")\n        else:\n            self.t_LINECONT = tok.type\n\n        self.t_WS = (self.t_SPACE, self.t_NEWLINE, self.t_LINECONT)\n\n        self.lexer.input(\"##\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"##\":\n            print(\"Couldn't determine token for token pasting operator\")\n        else:\n            self.t_DPOUND = tok.type\n\n        self.lexer.input(\"?\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"?\":\n            print(\"Couldn't determine token for ternary operator\")\n        else:\n            self.t_TERNARY = tok.type\n\n        self.lexer.input(\":\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \":\":\n            print(\"Couldn't determine token for ternary operator\")\n        else:\n            self.t_COLON = tok.type\n\n        self.lexer.input(\"/* comment */\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"/* comment */\":\n            print(\"Couldn't determine comment type\")\n        else:\n            self.t_COMMENT1 = tok.type\n\n        self.lexer.input(\"// comment\")\n        tok = self.lexer.token()\n        if not tok or tok.value != \"// comment\":\n            print(\"Couldn't determine comment type\")\n        else:\n            self.t_COMMENT2 = tok.type\n            \n        self.t_COMMENT = (self.t_COMMENT1, self.t_COMMENT2)\n\n        # Check for other characters used by the preprocessor\n        chars = [ '<','>','#','##','\\\\','(',')',',','.']\n        for c in chars:\n            self.lexer.input(c)\n            tok = self.lexer.token()\n            if not tok or tok.value != c:\n                print(\"Unable to lex '%s' required for preprocessor\" % c)\n\n    # ----------------------------------------------------------------------\n    # add_path()\n    #\n    # Adds a search path to the preprocessor.  \n    # ----------------------------------------------------------------------\n\n    def add_path(self,path):\n        \"\"\"Adds a search path to the preprocessor. \"\"\"\n        self.path.append(path)\n        # If the search path being added is relative, or has a common ancestor to the\n        # current working directory, add a rewrite to relativise includes from this\n        # search path\n        relpath = None\n        try:\n            relpath = os.path.relpath(path)\n        except: pass\n        if relpath is not None:\n            self.rewrite_paths += [(re.escape(os.path.abspath(path) + os.sep) + '(.*)', os.path.join(relpath, '\\\\1'))]\n\n\n    # ----------------------------------------------------------------------\n    # group_lines()\n    #\n    # Given an input string, this function splits it into lines.  Trailing whitespace\n    # is removed. This function forms the lowest level of the preprocessor---grouping\n    # text into a line-by-line format.\n    # ----------------------------------------------------------------------\n\n    def group_lines(self,input,abssource):\n        r\"\"\"Given an input string, this function splits it into lines.  Trailing whitespace\n        is removed. This function forms the lowest level of the preprocessor---grouping\n        text into a line-by-line format.\n        \"\"\"\n        lex = self.lexer.clone()\n        lines = [x.rstrip() for x in input.splitlines()]\n\n        input = \"\\n\".join(lines)\n        lex.input(input)\n        lex.lineno = 1\n\n        current_line = []\n        while True:\n            tok = lex.token()\n            if not tok:\n                break\n            tok.source = abssource\n            current_line.append(tok)\n            if tok.type in self.t_WS and tok.value == '\\n':\n                yield current_line\n                current_line = []\n\n        if current_line:\n            nltok = copy.copy(current_line[-1])\n            nltok.type = self.t_NEWLINE\n            nltok.value = '\\n'\n            current_line.append(nltok)\n            yield current_line\n\n    # ----------------------------------------------------------------------\n    # tokenstrip()\n    # \n    # Remove leading/trailing whitespace tokens from a token list\n    # ----------------------------------------------------------------------\n\n    def tokenstrip(self,tokens):\n        \"\"\"Remove leading/trailing whitespace tokens from a token list\"\"\"\n        i = 0\n        while i < len(tokens) and tokens[i].type in self.t_WS:\n            i += 1\n        del tokens[:i]\n        i = len(tokens)-1\n        while i >= 0 and tokens[i].type in self.t_WS:\n            i -= 1\n        del tokens[i+1:]\n        return tokens\n\n\n    # ----------------------------------------------------------------------\n    # collect_args()\n    #\n    # Collects comma separated arguments from a list of tokens.   The arguments\n    # must be enclosed in parenthesis.  Returns a tuple (tokencount,args,positions)\n    # where tokencount is the number of tokens consumed, args is a list of arguments,\n    # and positions is a list of integers containing the starting index of each\n    # argument.  Each argument is represented by a list of tokens.\n    #\n    # When collecting arguments, leading and trailing whitespace is removed\n    # from each argument.  \n    #\n    # This function properly handles nested parenthesis and commas---these do not\n    # define new arguments.\n    # ----------------------------------------------------------------------\n\n    def collect_args(self,tokenlist,ignore_errors=False):\n        \"\"\"Collects comma separated arguments from a list of tokens.   The arguments\n        must be enclosed in parenthesis.  Returns a tuple (tokencount,args,positions)\n        where tokencount is the number of tokens consumed, args is a list of arguments,\n        and positions is a list of integers containing the starting index of each\n        argument.  Each argument is represented by a list of tokens.\n        \n        When collecting arguments, leading and trailing whitespace is removed\n        from each argument.  \n        \n        This function properly handles nested parenthesis and commas---these do not\n        define new arguments.\"\"\"\n        args = []\n        positions = []\n        current_arg = []\n        nesting = 1\n        tokenlen = len(tokenlist)\n    \n        # Search for the opening '('.\n        i = 0\n        while (i < tokenlen) and (tokenlist[i].type in self.t_WS):\n            i += 1\n\n        if (i < tokenlen) and (tokenlist[i].value == '('):\n            positions.append(i+1)\n        else:\n            if not ignore_errors:\n                self.on_error(tokenlist[0].source,tokenlist[0].lineno,\"Missing '(' in macro arguments\")\n            return 0, [], []\n\n        i += 1\n\n        while i < tokenlen:\n            t = tokenlist[i]\n            if t.value == '(':\n                current_arg.append(t)\n                nesting += 1\n            elif t.value == ')':\n                nesting -= 1\n                if nesting == 0:\n                    args.append(self.tokenstrip(current_arg))\n                    positions.append(i)\n                    return i+1,args,positions\n                current_arg.append(t)\n            elif t.value == ',' and nesting == 1:\n                args.append(self.tokenstrip(current_arg))\n                positions.append(i+1)\n                current_arg = []\n            else:\n                current_arg.append(t)\n            i += 1\n    \n        # Missing end argument\n        if not ignore_errors:\n            self.on_error(tokenlist[-1].source,tokenlist[-1].lineno,\"Missing ')' in macro arguments\")\n        return 0, [],[]\n\n    # ----------------------------------------------------------------------\n    # macro_prescan()\n    #\n    # Examine the macro value (token sequence) and identify patch points\n    # This is used to speed up macro expansion later on---we'll know\n    # right away where to apply patches to the value to form the expansion\n    # ----------------------------------------------------------------------\n    \n    def macro_prescan(self,macro):\n        \"\"\"Examine the macro value (token sequence) and identify patch points\n        This is used to speed up macro expansion later on---we'll know\n        right away where to apply patches to the value to form the expansion\"\"\"\n        macro.patch     = []             # Standard macro arguments \n        macro.str_patch = []             # String conversion expansion\n        macro.var_comma_patch = []       # Variadic macro comma patch\n        i = 0\n        #print(\"BEFORE\", macro.value)\n        #print(\"BEFORE\", [x.value for x in macro.value])\n        while i < len(macro.value):\n            if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:\n                argnum = macro.arglist.index(macro.value[i].value)\n                # Conversion of argument to a string\n                j = i - 1\n                while j >= 0 and macro.value[j].type in self.t_WS:\n                    j -= 1\n                if j >= 0 and macro.value[j].value == '#':\n                    macro.value[i] = copy.copy(macro.value[i])\n                    macro.value[i].type = self.t_STRING\n                    while i > j:\n                        del macro.value[j]\n                        i -= 1\n                    macro.str_patch.append((argnum,i))\n                    continue\n                # Concatenation\n                elif (i > 0 and macro.value[i-1].value == '##'):\n                    macro.patch.append(('t',argnum,i))\n                    i += 1\n                    continue\n                elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):\n                    macro.patch.append(('t',argnum,i))\n                    i += 1\n                    continue\n                # Standard expansion\n                else:\n                    macro.patch.append(('e',argnum,i))\n            elif macro.value[i].value == '##':\n                if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \\\n                        ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \\\n                        (macro.value[i+1].value == macro.vararg):\n                    macro.var_comma_patch.append(i-1)\n            i += 1\n        macro.patch.sort(key=lambda x: x[2],reverse=True)\n        #print(\"AFTER\", macro.value)\n        #print(\"AFTER\", [x.value for x in macro.value])\n        #print(macro.patch)\n\n    # ----------------------------------------------------------------------\n    # macro_expand_args()\n    #\n    # Given a Macro and list of arguments (each a token list), this method\n    # returns an expanded version of a macro.  The return value is a token sequence\n    # representing the replacement macro tokens\n    # ----------------------------------------------------------------------\n\n    def macro_expand_args(self,macro,args,expanding_from):\n        \"\"\"Given a Macro and list of arguments (each a token list), this method\n        returns an expanded version of a macro.  The return value is a token sequence\n        representing the replacement macro tokens\"\"\"\n        # Make a copy of the macro token sequence\n        rep = [copy.copy(_x) for _x in macro.value]\n\n        # Make string expansion patches.  These do not alter the length of the replacement sequence\n        str_expansion = {}\n        for argnum, i in macro.str_patch:\n            if argnum not in str_expansion:\n                # Strip all non-space whitespace before stringization\n                tokens = copy.copy(args[argnum])\n                for j in range(len(tokens)):\n                    if tokens[j].type in self.t_WS and tokens[j].type != self.t_LINECONT:\n                        tokens[j].value = ' '\n                # Collapse all multiple whitespace too\n                j = 0\n                while j < len(tokens) - 1:\n                    if tokens[j].type in self.t_WS and tokens[j+1].type in self.t_WS:\n                        del tokens[j+1]\n                    else:\n                        j += 1\n                str = \"\".join([x.value for x in tokens])\n                str = str.replace(\"\\\\\",\"\\\\\\\\\").replace('\"', '\\\\\"')\n                str_expansion[argnum] = '\"' + str + '\"'\n            rep[i] = copy.copy(rep[i])\n            rep[i].value = str_expansion[argnum]\n\n        # Make the variadic macro comma patch.  If the variadic macro argument is empty, we get rid\n        comma_patch = False\n        if macro.variadic and not args[-1]:\n            for i in macro.var_comma_patch:\n                rep[i] = None\n                comma_patch = True\n\n        # Make all other patches.   The order of these matters.  It is assumed that the patch list\n        # has been sorted in reverse order of patch location since replacements will cause the\n        # size of the replacement sequence to expand from the patch point.\n        \n        expanded = { }\n        #print(\"***\", macro)\n        #print(macro.patch)\n        for ptype, argnum, i in macro.patch:\n            #print([x.value for x in rep])\n            # Concatenation.   Argument is left unexpanded\n            if ptype == 't':\n                rep[i:i+1] = args[argnum]\n            # Normal expansion.  Argument is macro expanded first\n            elif ptype == 'e':\n                #print('*** Function macro arg', rep[i], 'replace with', args[argnum], 'which expands into', self.expand_macros(copy.copy(args[argnum])))\n                if argnum not in expanded:\n                    expanded[argnum] = self.expand_macros(copy.copy(args[argnum]), expanding_from)\n                rep[i:i+1] = expanded[argnum]\n\n        # Get rid of removed comma if necessary\n        if comma_patch:\n            rep = [_i for _i in rep if _i]\n            \n        # Do a token concatenation pass, stitching any tokens separated by ## into a single token\n        while len(rep) and rep[0].type == self.t_DPOUND:\n            del rep[0]\n        while len(rep) and rep[-1].type == self.t_DPOUND:\n            del rep[-1]\n        i = 1\n        stitched = False\n        while i < len(rep) - 1:\n            if rep[i].type == self.t_DPOUND:\n                j = i + 1\n                while rep[j].type == self.t_DPOUND:\n                    j += 1\n                rep[i-1] = copy.copy(rep[i-1])\n                rep[i-1].type = None\n                rep[i-1].value += rep[j].value\n                while j >= i:\n                    del rep[i]\n                    j -= 1\n                stitched = True\n            else:\n                i += 1\n        if stitched:\n            # Stitched tokens will have unknown type, so figure those out now\n            i = 0\n            lex = self.lexer.clone()\n            while i < len(rep):\n                if rep[i].type is None:\n                    lex.input(rep[i].value)\n                    toks = []\n                    while True:\n                        tok = lex.token()\n                        if not tok:\n                            break\n                        toks.append(tok)\n                    if len(toks) != 1:\n                        # Split it once again\n                        while len(toks) > 1:\n                            rep.insert(i+1, copy.copy(rep[i]))\n                            rep[i+1].value = toks[-1].value\n                            rep[i+1].type = toks[-1].type\n                            toks.pop()\n                        rep[i].value = toks[0].value\n                        rep[i].type = toks[0].type\n                    else:\n                        rep[i].type = toks[0].type\n                i += 1\n\n        #print rep\n        return rep\n\n\n    # ----------------------------------------------------------------------\n    # expand_macros()\n    #\n    # Given a list of tokens, this function performs macro expansion.\n    # ----------------------------------------------------------------------\n\n    def expand_macros(self,tokens,expanding_from=[]):\n        \"\"\"Given a list of tokens, this function performs macro expansion.\"\"\"\n        # Each token needs to track from which macros it has been expanded from to prevent recursion\n        for tok in tokens:\n            if not hasattr(tok, 'expanded_from'):\n                tok.expanded_from = []\n        i = 0\n        #print(\"*** EXPAND MACROS in\", \"\".join([t.value for t in tokens]), \"expanding_from=\", expanding_from)\n        #print(tokens)\n        #print([(t.value, t.expanded_from) for t in tokens])\n        while i < len(tokens):\n            t = tokens[i]\n            if self.linemacrodepth == 0:\n                self.linemacro = t.lineno\n            self.linemacrodepth = self.linemacrodepth + 1\n            if t.type == self.t_ID:\n                if t.value in self.macros and t.value not in t.expanded_from and t.value not in expanding_from:\n                    # Yes, we found a macro match\n                    m = self.macros[t.value]\n                    if m.arglist is None:\n                        # A simple macro\n                        rep = [copy.copy(_x) for _x in m.value]\n                        ex = self.expand_macros(rep, expanding_from + [t.value])\n                        #print(\"\\nExpanding macro\", m, \"\\ninto\", ex, \"\\nreplacing\", tokens[i:i+1])\n                        for e in ex:\n                            e.source = t.source\n                            e.lineno = t.lineno\n                            if not hasattr(e, 'expanded_from'):\n                                e.expanded_from = []\n                            e.expanded_from.append(t.value)\n                        tokens[i:i+1] = ex\n                    else:\n                        # A macro with arguments\n                        j = i + 1\n                        while j < len(tokens) and (tokens[j].type in self.t_WS or tokens[j].type in self.t_COMMENT):\n                            j += 1\n                        # A function like macro without an invocation list is to be ignored\n                        if j == len(tokens) or tokens[j].value != '(':\n                            i = j\n                        else:\n                            tokcount,args,positions = self.collect_args(tokens[j:], True)\n                            if tokcount == 0:\n                                # Unclosed parameter list, just bail out\n                                break\n                            if (not m.variadic\n                                # A no arg or single arg consuming macro is permitted to be expanded with nothing\n                                and (args != [[]] or len(m.arglist) > 1)\n                                and len(args) !=  len(m.arglist)):\n                                self.on_error(t.source,t.lineno,\"Macro %s requires %d arguments but was passed %d\" % (t.value,len(m.arglist),len(args)))\n                                i = j + tokcount\n                            elif m.variadic and len(args) < len(m.arglist)-1:\n                                if len(m.arglist) > 2:\n                                    self.on_error(t.source,t.lineno,\"Macro %s must have at least %d arguments\" % (t.value, len(m.arglist)-1))\n                                else:\n                                    self.on_error(t.source,t.lineno,\"Macro %s must have at least %d argument\" % (t.value, len(m.arglist)-1))\n                                i = j + tokcount\n                            else:\n                                if m.variadic:\n                                    if len(args) == len(m.arglist)-1:\n                                        args.append([])\n                                    else:\n                                        args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]\n                                        del args[len(m.arglist):]\n                                else:\n                                    # If we called a single arg macro with empty, fake extend args\n                                    while len(args) < len(m.arglist):\n                                        args.append([])\n                                        \n                                # Get macro replacement text\n                                rep = self.macro_expand_args(m, args, expanding_from)\n                                ex = self.expand_macros(rep, expanding_from + [t.value])\n                                for e in ex:\n                                    e.source = t.source\n                                    e.lineno = t.lineno\n                                    if not hasattr(e, 'expanded_from'):\n                                        e.expanded_from = []\n                                    e.expanded_from.append(t.value)\n                                # A non-conforming extension implemented by the GCC and clang preprocessors\n                                # is that an expansion of a macro with arguments where the following token is\n                                # an identifier inserts a space between the expansion and the identifier. This\n                                # differs from Boost.Wave incidentally (see https://github.com/ned14/pcpp/issues/29)\n                                if len(tokens) > j+tokcount and tokens[j+tokcount].type in self.t_ID:\n                                    #print(\"*** token after expansion is\", tokens[j+tokcount])\n                                    newtok = copy.copy(tokens[j+tokcount])\n                                    newtok.type = self.t_SPACE\n                                    newtok.value = ' '\n                                    ex.append(newtok)\n                                #print(\"\\nExpanding macro\", m, \"\\n\\ninto\", ex, \"\\n\\nreplacing\", tokens[i:j+tokcount])\n                                tokens[i:j+tokcount] = ex\n                    self.linemacrodepth = self.linemacrodepth - 1\n                    if self.linemacrodepth == 0:\n                        self.linemacro = 0\n                    continue\n                elif self.expand_linemacro and t.value == '__LINE__':\n                    t.type = self.t_INTEGER\n                    t.value = self.t_INTEGER_TYPE(self.linemacro)\n                elif self.expand_countermacro and t.value == '__COUNTER__':\n                    t.type = self.t_INTEGER\n                    t.value = self.t_INTEGER_TYPE(self.countermacro)\n                    self.countermacro += 1\n                \n            i += 1\n            self.linemacrodepth = self.linemacrodepth - 1\n            if self.linemacrodepth == 0:\n                self.linemacro = 0\n        return tokens\n\n    # ----------------------------------------------------------------------    \n    # evalexpr()\n    # \n    # Evaluate an expression token sequence for the purposes of evaluating\n    # integral expressions.\n    # ----------------------------------------------------------------------\n\n    def evalexpr(self,tokens):\n        \"\"\"Evaluate an expression token sequence for the purposes of evaluating\n        integral expressions.\"\"\"\n        if not tokens:\n            self.on_error('unknown', 0, \"Empty expression\")\n            return (0, None)\n        # tokens = tokenize(line)\n        # Search for defined macros\n        partial_expansion = False\n        def replace_defined(tokens):\n            i = 0\n            while i < len(tokens):\n                if tokens[i].type == self.t_ID and tokens[i].value == 'defined':\n                    j = i + 1\n                    needparen = False\n                    result = \"0L\"\n                    while j < len(tokens):\n                        if tokens[j].type in self.t_WS:\n                            j += 1\n                            continue\n                        elif tokens[j].type == self.t_ID:\n                            if tokens[j].value in self.macros:\n                                result = \"1L\"\n                            elif not self.passthru_expr_has_include and tokens[j].value == '__has_include':\n                                result = \"1L\"\n                            else:\n                                repl = self.on_unknown_macro_in_defined_expr(tokens[j])\n                                if repl is None:\n                                    partial_expansion = True\n                                    result = 'defined('+tokens[j].value+')'\n                                else:\n                                    result = \"1L\" if repl else \"0L\"\n                            if not needparen: break\n                        elif tokens[j].value == '(':\n                            needparen = True\n                        elif tokens[j].value == ')':\n                            break\n                        else:\n                            self.on_error(tokens[i].source,tokens[i].lineno,\"Malformed defined()\")\n                        j += 1\n                    if result.startswith('defined'):\n                        tokens[i].type = self.t_ID\n                        tokens[i].value = result\n                    else:\n                        tokens[i].type = self.t_INTEGER\n                        tokens[i].value = self.t_INTEGER_TYPE(result)\n                    del tokens[i+1:j+1]\n                i += 1\n            return tokens\n        # Replace any defined(macro) before macro expansion\n        tokens = replace_defined(tokens)\n        tokens = self.expand_macros(tokens)\n        # Replace any defined(macro) after macro expansion\n        tokens = replace_defined(tokens)\n        if not self.passthru_expr_has_include:\n            # We need to specially handle _has_include(<...>) because the inner <...> parses as an invalid expression.\n            # We do this by injecting it as a string, and we undo that later.\n            def replace_has_include(tokens):\n                i = 0\n                while i < len(tokens):\n                    if tokens[i].type == self.t_ID and tokens[i].value == '__has_include':\n                        j = i + 1\n                        needparen = False\n                        bracketpos = -1\n                        while j < len(tokens):\n                            if tokens[j].type in self.t_WS:\n                                j += 1\n                                continue\n                            elif tokens[j].type == self.t_ID:\n                                assert bracketpos >= 0\n                                # Convert the <id> into a string\n                                tokens[j].type = self.t_STRING\n                                tokens[j].value = '\"' + ''.join([tokens[x].value for x in range(bracketpos, j + 1)])\n                                del tokens[bracketpos:j]\n                                j = bracketpos\n                            elif tokens[j].value == '<':\n                                bracketpos = j\n                            elif tokens[j].value == '>':\n                                assert bracketpos > 0\n                                tokens[bracketpos].value += ''.join([tokens[x].value for x in range(bracketpos + 1, j + 1)]) + '\"'\n                                del tokens[bracketpos + 1:j + 1]\n                                j = bracketpos\n                                bracketpos = -1\n                            elif tokens[j].value == '(':\n                                needparen = True\n                            elif tokens[j].value == ')':\n                                break\n                            elif tokens[j].type != self.t_STRING:\n                                self.on_error(tokens[i].source,tokens[i].lineno,\"Malformed __has_include()\")\n                            j += 1\n                    i += 1\n                return tokens\n            tokens = replace_has_include(tokens)\n        if not tokens:\n            return (0, None)\n        class IndirectToMacroHook(object):\n            def __init__(self, p):\n                self.__preprocessor = p\n                self.partial_expansion = False\n            def __contains__(self, key):\n                return True\n            def __getitem__(self, key):\n                if key.startswith('defined('):\n                    self.partial_expansion = True\n                    return 0\n                repl = self.__preprocessor.on_unknown_macro_in_expr(key)\n                #print(\"*** IndirectToMacroHook[\", key, \"] returns\", repl, file = sys.stderr)\n                if repl is None:\n                    self.partial_expansion = True\n                    return key\n                return repl\n        evalvars = IndirectToMacroHook(self)\n        class IndirectToHasInclude(object):\n            def __init__(self, p):\n                self.__preprocessor = p\n            def __call__(self, x):\n                #print(\"*** has_include\", x, file = sys.stderr)\n                if x.startswith('\"<') and x.endswith('>\"'):\n                    # Undo our special handling from earlier\n                    x = x[1:-1]\n                x = self.__preprocessor.tokenize(x)\n                exists = [ p for p in self.__preprocessor.include(x, x, include_exists_only=True) ]\n                return 1 if exists[0] else 0\n        class IndirectToMacroFunctionHook(object):\n            def __init__(self, p):\n                self.__preprocessor = p\n                self.partial_expansion = False\n            def __contains__(self, key):\n                return True\n            def __getitem__(self, key):\n                if not self.__preprocessor.passthru_expr_has_include and key == '__has_include':\n                    return IndirectToHasInclude(self.__preprocessor)\n                repl = self.__preprocessor.on_unknown_macro_function_in_expr(key)\n                #print(\"*** IndirectToMacroFunctionHook[\", key, \"] returns\", repl, file = sys.stderr)\n                if repl is None:\n                    self.partial_expansion = True\n                    return key\n                return repl\n        evalfuncts = IndirectToMacroFunctionHook(self)\n        try:\n            result = self.evaluator(tokens, functions = evalfuncts, identifiers = evalvars).value()\n            partial_expansion = partial_expansion or evalvars.partial_expansion or evalfuncts.partial_expansion\n        except OutputDirective:\n            raise\n        except Exception as e:\n            partial_expansion = partial_expansion or evalvars.partial_expansion or evalfuncts.partial_expansion\n            if not partial_expansion:\n                self.on_error(tokens[0].source,tokens[0].lineno,\"Could not evaluate expression due to %s (passed to evaluator: '%s')\" % (repr(e), ''.join([tok.value for tok in tokens])))\n            result = 0\n        return (result, tokens) if partial_expansion else (result, None)\n\n    # ----------------------------------------------------------------------\n    # parsegen()\n    #\n    # Parse an input string\n    # ----------------------------------------------------------------------\n    def parsegen(self,input,source=None,abssource=None):\n        \"\"\"Parse an input string\"\"\"\n        rewritten_source = source\n        if abssource:\n            rewritten_source = abssource\n            for rewrite in self.rewrite_paths:\n                temp = re.sub(rewrite[0], rewrite[1], rewritten_source)\n                if temp != abssource:\n                    rewritten_source = temp\n                    if os.sep != '/':\n                        rewritten_source = rewritten_source.replace(os.sep, '/')\n                    break\n\n        # Replace trigraph sequences\n        t = trigraph(input) if self.enable_trigraphs else input\n        lines = self.group_lines(t, rewritten_source)\n\n        if not source:\n            source = \"\"\n        if not rewritten_source:\n            rewritten_source = \"\"\n            \n        my_include_times_idx = len(self.include_times)\n        self.include_times.append(FileInclusionTime(self.macros['__FILE__'] if '__FILE__' in self.macros else None, source, abssource, self.include_depth))\n        self.include_depth += 1\n        my_include_time_begin = clock()\n        if self.expand_filemacro:\n            self.define(\"__FILE__ \\\"%s\\\"\" % rewritten_source)\n\n        self.source = abssource\n        chunk = []\n        enable = True\n        iftrigger = False\n        ifpassthru = False\n        class ifstackentry(object):\n            def __init__(self,enable,iftrigger,ifpassthru,startlinetoks):\n                self.enable = enable\n                self.iftrigger = iftrigger\n                self.ifpassthru = ifpassthru\n                self.rewritten = False\n                self.startlinetoks = startlinetoks\n        ifstack = []\n        # True until any non-whitespace output or anything with effects happens.\n        at_front_of_file = True\n        # True if auto pragma once still a possibility for this #include\n        auto_pragma_once_possible = self.auto_pragma_once_enabled\n        # =(MACRO, 0) means #ifndef MACRO or #if !defined(MACRO) seen, =(MACRO,1) means #define MACRO seen\n        include_guard = None\n        self.on_potential_include_guard(None)\n\n        for x in lines:\n            all_whitespace = True\n            skip_auto_pragma_once_possible_check = False\n            # Handle comments\n            for i,tok in enumerate(x):\n                if tok.type in self.t_COMMENT:\n                    if not self.on_comment(tok):\n                        if tok.type == self.t_COMMENT1:\n                            tok.value = ' '\n                        elif tok.type == self.t_COMMENT2:\n                            tok.value = '\\n'\n                        tok.type = 'CPP_WS'\n            # Skip over whitespace\n            for i,tok in enumerate(x):\n                if tok.type not in self.t_WS and tok.type not in self.t_COMMENT:\n                    all_whitespace = False\n                    break\n            output_and_expand_line = True\n            output_unexpanded_line = False\n            if tok.value == '#':\n                precedingtoks = [ tok ]\n                output_and_expand_line = False\n                try:\n                    # Preprocessor directive      \n                    i += 1\n                    while i < len(x) and x[i].type in self.t_WS:\n                        precedingtoks.append(x[i])\n                        i += 1\n                    dirtokens = self.tokenstrip(x[i:])\n                    if dirtokens:\n                        name = dirtokens[0].value\n                        args = self.tokenstrip(dirtokens[1:])\n                    \n                        if self.debugout is not None:\n                            print(\"%d:%d:%d %s:%d #%s %s\" % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno, dirtokens[0].value, \"\".join([tok.value for tok in args])), file = self.debugout)\n                            #print(ifstack)\n\n                        handling = self.on_directive_handle(dirtokens[0],args,ifpassthru,precedingtoks)\n                        assert handling == True or handling == None\n                    else:\n                        name = \"\"\n                        args = []\n                        raise OutputDirective(Action.IgnoreAndRemove)\n                        \n                    if name == 'define':\n                        at_front_of_file = False\n                        if enable:\n                            for tok in self.expand_macros(chunk):\n                                yield tok\n                            chunk = []\n                            if include_guard and include_guard[1] == 0:\n                                if include_guard[0] == args[0].value and len(args) == 1:\n                                    include_guard = (args[0].value, 1)\n                                    # If ifpassthru is only turned on due to this include guard, turn it off\n                                    if ifpassthru and not ifstack[-1].ifpassthru:\n                                        ifpassthru = False\n                            self.define(args)\n                            if self.debugout is not None:\n                                print(\"%d:%d:%d %s:%d      %s\" % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno, repr(self.macros[args[0].value])), file = self.debugout)\n                            if handling is None:\n                                for tok in x:\n                                    yield tok\n                    elif name == 'include' or (self.include_next_enabled and name == 'include_next'):\n                        if enable:\n                            for tok in self.expand_macros(chunk):\n                                yield tok\n                            chunk = []\n                            oldfile = self.macros['__FILE__'] if '__FILE__' in self.macros else None\n                            if args and args[0].value != '<' and args[0].type != self.t_STRING:\n                                args = self.tokenstrip(self.expand_macros(args))\n                            # print('***', ''.join([x.value for x in args]), file = sys.stderr)\n                            for tok in self.include(args, x,\n                                                    include_next_is_active = (name == 'include_next' and abssource is not None)):\n                                yield tok\n                            if oldfile is not None:\n                                self.macros['__FILE__'] = oldfile\n                            self.source = abssource\n                    elif name == 'undef':\n                        at_front_of_file = False\n                        if enable:\n                            for tok in self.expand_macros(chunk):\n                                yield tok\n                            chunk = []\n                            self.undef(args)\n                            if handling is None:\n                                for tok in x:\n                                    yield tok\n                    elif name == 'ifdef':\n                        at_front_of_file = False\n                        ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                        if enable:\n                            ifpassthru = False\n                            if not args[0].value in self.macros and (self.passthru_expr_has_include or args[0].value != '__has_include'):\n                                res = self.on_unknown_macro_in_defined_expr(args[0])\n                                if res is None:\n                                    ifpassthru = True\n                                    ifstack[-1].rewritten = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                elif res is True:\n                                    iftrigger = True\n                                else:\n                                    enable = False\n                                    iftrigger = False\n                            else:\n                                iftrigger = True\n                    elif name == 'ifndef':\n                        if not ifstack and at_front_of_file:\n                            self.on_potential_include_guard(args[0].value)\n                            include_guard = (args[0].value, 0)\n                        at_front_of_file = False\n                        ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                        if enable:\n                            ifpassthru = False\n                            if args[0].value in self.macros or (not self.passthru_expr_has_include and args[0].value == '__has_include'):\n                                enable = False\n                                iftrigger = False\n                            else:\n                                res = self.on_unknown_macro_in_defined_expr(args[0])\n                                if res is None:\n                                    ifpassthru = True\n                                    ifstack[-1].rewritten = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                elif res is True:\n                                    enable = False\n                                    iftrigger = False\n                                else:\n                                    iftrigger = True\n                    elif name == 'if':\n                        if not ifstack and at_front_of_file:\n                            if args[0].value == '!' and args[1].value == 'defined':\n                                n = 2\n                                if args[n].value == '(': n += 1\n                                self.on_potential_include_guard(args[n].value)\n                                include_guard = (args[n].value, 0)\n                        at_front_of_file = False\n                        ifstack.append(ifstackentry(enable,iftrigger,ifpassthru,x))\n                        if enable:\n                            iftrigger = False\n                            ifpassthru = False\n                            result, rewritten = self.evalexpr(args)\n                            if rewritten is not None:\n                                x = x[:i+2] + rewritten + [x[-1]]\n                                x[i+1] = copy.copy(x[i+1])\n                                x[i+1].type = self.t_SPACE\n                                x[i+1].value = ' '\n                                ifpassthru = True\n                                ifstack[-1].rewritten = True\n                                raise OutputDirective(Action.IgnoreAndPassThrough)\n                            if not result:\n                                enable = False\n                            else:\n                                iftrigger = True\n                    elif name == 'elif':\n                        at_front_of_file = False\n                        if ifstack:\n                            if ifstack[-1].enable:     # We only pay attention if outer \"if\" allows this\n                                if enable and not ifpassthru:         # If already true, we flip enable False\n                                    enable = False\n                                elif not iftrigger:   # If False, but not triggered yet, we'll check expression\n                                    result, rewritten = self.evalexpr(args)\n                                    if rewritten is not None:\n                                        enable = True\n                                        if not ifpassthru:\n                                            # This is a passthru #elif after a False #if, so convert to an #if\n                                            x[i].value = 'if'\n                                        x = x[:i+2] + rewritten + [x[-1]]\n                                        x[i+1] = copy.copy(x[i+1])\n                                        x[i+1].type = self.t_SPACE\n                                        x[i+1].value = ' '\n                                        ifpassthru = True\n                                        ifstack[-1].rewritten = True\n                                        raise OutputDirective(Action.IgnoreAndPassThrough)\n                                    if ifpassthru:\n                                        # If this elif can only ever be true, simulate that\n                                        if result:\n                                            newtok = copy.copy(x[i+3])\n                                            newtok.type = self.t_INTEGER\n                                            newtok.value = self.t_INTEGER_TYPE(result)\n                                            x = x[:i+2] + [newtok] + [x[-1]]\n                                            raise OutputDirective(Action.IgnoreAndPassThrough)\n                                        # Otherwise elide\n                                        enable = False\n                                    elif result:\n                                        enable  = True\n                                        iftrigger = True\n                        else:\n                            self.on_error(dirtokens[0].source,dirtokens[0].lineno,\"Misplaced #elif\")\n                            \n                    elif name == 'else':\n                        at_front_of_file = False\n                        if ifstack:\n                            if ifstack[-1].enable:\n                                if ifpassthru:\n                                    enable = True\n                                    raise OutputDirective(Action.IgnoreAndPassThrough)\n                                if enable:\n                                    enable = False\n                                elif not iftrigger:\n                                    enable = True\n                                    iftrigger = True\n                        else:\n                            self.on_error(dirtokens[0].source,dirtokens[0].lineno,\"Misplaced #else\")\n\n                    elif name == 'endif':\n                        at_front_of_file = False\n                        if ifstack:\n                            oldifstackentry = ifstack.pop()\n                            enable = oldifstackentry.enable\n                            iftrigger = oldifstackentry.iftrigger\n                            ifpassthru = oldifstackentry.ifpassthru\n                            if self.debugout is not None:\n                                print(\"%d:%d:%d %s:%d      (%s:%d %s)\" % (enable, iftrigger, ifpassthru, dirtokens[0].source, dirtokens[0].lineno,\n                                    oldifstackentry.startlinetoks[0].source, oldifstackentry.startlinetoks[0].lineno, \"\".join([n.value for n in oldifstackentry.startlinetoks])), file = self.debugout)\n                            skip_auto_pragma_once_possible_check = True\n                            if oldifstackentry.rewritten:\n                                raise OutputDirective(Action.IgnoreAndPassThrough)\n                        else:\n                            self.on_error(dirtokens[0].source,dirtokens[0].lineno,\"Misplaced #endif\")\n                    elif name == 'pragma' and args[0].value == 'once':\n                        if enable:\n                            self.include_once[self.source] = None\n                    elif enable:\n                        # Unknown preprocessor directive\n                        output_unexpanded_line = (self.on_directive_unknown(dirtokens[0], args, ifpassthru, precedingtoks) is None)\n\n                except OutputDirective as e:\n                    if e.action == Action.IgnoreAndPassThrough:\n                        output_unexpanded_line = True\n                    elif e.action == Action.IgnoreAndRemove:\n                        pass\n                    else:\n                        assert False\n\n            # If there is ever any non-whitespace output outside an include guard, auto pragma once is not possible\n            if not skip_auto_pragma_once_possible_check and auto_pragma_once_possible and not ifstack and not all_whitespace:\n                auto_pragma_once_possible = False\n                if self.debugout is not None:\n                    print(\"%d:%d:%d %s:%d Determined that #include \\\"%s\\\" is not entirely wrapped in an include guard macro, disabling auto-applying #pragma once\" % (enable, iftrigger, ifpassthru, x[0].source, x[0].lineno, self.source), file = self.debugout)\n                \n            if output_and_expand_line or output_unexpanded_line:\n                if not all_whitespace:\n                    at_front_of_file = False\n\n                # Normal text\n                if enable:\n                    if output_and_expand_line:\n                        chunk.extend(x)\n                    elif output_unexpanded_line:\n                        for tok in self.expand_macros(chunk):\n                            yield tok\n                        chunk = []\n                        for tok in x:\n                            yield tok\n                else:\n                    # Need to extend with the same number of blank lines\n                    i = 0\n                    while i < len(x):\n                        if x[i].type not in self.t_WS:\n                            del x[i]\n                        else:\n                            i += 1\n                    chunk.extend(x)\n\n        for tok in self.expand_macros(chunk):\n            yield tok\n        chunk = []\n        for i in ifstack:\n            self.on_error(i.startlinetoks[0].source, i.startlinetoks[0].lineno, \"Unterminated \" + \"\".join([n.value for n in i.startlinetoks]))\n        if auto_pragma_once_possible and include_guard and include_guard[1] == 1:\n            if self.debugout is not None:\n                print(\"%d:%d:%d %s:%d Determined that #include \\\"%s\\\" is entirely wrapped in an include guard macro called %s, auto-applying #pragma once\" % (enable, iftrigger, ifpassthru, self.source, 0, self.source, include_guard[0]), file = self.debugout)\n            self.include_once[self.source] = include_guard[0]\n        elif self.auto_pragma_once_enabled and self.source not in self.include_once:\n            if self.debugout is not None:\n                print(\"%d:%d:%d %s:%d Did not auto apply #pragma once to this file due to auto_pragma_once_possible=%d, include_guard=%s\" % (enable, iftrigger, ifpassthru, self.source, 0, auto_pragma_once_possible, repr(include_guard)), file = self.debugout)\n        my_include_time_end = clock()\n        self.include_times[my_include_times_idx].elapsed = my_include_time_end - my_include_time_begin\n        self.include_depth -= 1\n\n    # ----------------------------------------------------------------------\n    # include()\n    #\n    # Implementation of file-inclusion\n    # ----------------------------------------------------------------------\n\n    def include(self,tokens,original_line,include_next_is_active=False,include_exists_only=False):\n        \"\"\"Implementation of file-inclusion\"\"\"\n        # Try to extract the filename and then process an include file\n        if not tokens:\n            return\n        if tokens:\n            if tokens[0].value != '<' and tokens[0].type != self.t_STRING:\n                tokens = self.tokenstrip(self.expand_macros(tokens))\n\n            is_system_include = False\n            if tokens[0].value == '<':\n                is_system_include = True\n                # Include <...>\n                i = 1\n                while i < len(tokens):\n                    if tokens[i].value == '>':\n                        break\n                    i += 1\n                else:\n                    self.on_error(tokens[0].source,tokens[0].lineno,\"Malformed #include <...>\")\n                    return\n                filename = \"\".join([x.value for x in tokens[1:i]])\n                if not include_next_is_active:\n                    # Search only formally specified paths\n                    path = self.path\n                else:\n                    # include_next triggered this, must not differentiate\n                    path = self.temp_path + self.path\n            elif tokens[0].type == self.t_STRING:\n                filename = tokens[0].value[1:-1]\n                # Search from each nested include file, as well as formally specified paths\n                path = self.temp_path + self.path\n            else:\n                p = self.on_include_not_found(True,False,self.temp_path[0] if self.temp_path else '',tokens[0].value)\n                assert p is None\n                return\n        if not path:\n            path = ['']\n        while True:\n            #print path\n            for p in path:\n                iname = os.path.join(p,filename)\n                fulliname = os.path.abspath(iname)\n                if not include_exists_only and fulliname in self.include_once:\n                    if self.debugout is not None:\n                        print(\"x:x:x x:x #include \\\"%s\\\" skipped as already seen\" % (fulliname), file = self.debugout)\n                    if self.passthru_includes is not None and self.passthru_includes.match(''.join([x.value for x in tokens])):\n                        for tok in original_line:\n                            yield tok\n                    return\n                try:\n                    ih = self.on_file_open(is_system_include,fulliname)\n                    if include_exists_only:\n                        ih.close()\n                        yield True\n                        return\n                    unique_id = self.__file_unique_id(ih)\n                    if include_next_is_active and unique_id in self.current_include_next_unique_ids:\n                        ih.close()\n                        continue\n                    data = ih.read()\n                    ih.close()\n                    dname = os.path.dirname(fulliname)\n                    if dname:\n                        self.temp_path.insert(0,dname)\n                    self.current_include_next_unique_ids.append(unique_id)\n                    if self.passthru_includes is not None and self.passthru_includes.match(''.join([x.value for x in tokens])):\n                        for tok in original_line:\n                            yield tok\n                        for tok in self.parsegen(data,filename,fulliname):\n                            pass\n                    else:\n                        for tok in self.parsegen(data,filename,fulliname):\n                            yield tok\n                    self.current_include_next_unique_ids.remove(unique_id)\n                    if dname:\n                        del self.temp_path[0]\n                    return\n                except IOError:\n                    pass\n            else:\n                if include_exists_only:\n                    yield False\n                    return\n                p = self.on_include_not_found(False,is_system_include,self.temp_path[0] if self.temp_path else '',filename)\n                assert p is not None\n                path.append(p)\n\n    # ----------------------------------------------------------------------\n    # define()\n    #\n    # Define a new macro\n    # ----------------------------------------------------------------------\n\n    def define(self,tokens):\n        \"\"\"Define a new macro\"\"\"\n        if isinstance(tokens,STRING_TYPES):\n            tokens = self.tokenize(tokens)\n        else:\n            tokens = [copy.copy(tok) for tok in tokens]\n        def add_macro(self, name, macro):\n            macro.source = name.source\n            macro.lineno = name.lineno\n            self.macros[name.value] = macro\n\n        linetok = tokens\n        try:\n            name = linetok[0]\n            if len(linetok) > 1:\n                mtype = linetok[1]\n            else:\n                mtype = None\n            if not mtype:\n                m = Macro(name.value,[])\n                add_macro(self, name, m)\n            elif mtype.type in self.t_WS:\n                # A normal macro\n                m = Macro(name.value,self.tokenstrip(linetok[2:]))\n                add_macro(self, name, m)\n            elif mtype.value == '(':\n                # A macro with arguments\n                tokcount, args, positions = self.collect_args(linetok[1:])\n                variadic = False\n                for a in args:\n                    if variadic:\n                        self.on_error(name.source,name.lineno,\"No more arguments may follow a variadic argument\")\n                        break\n                    astr = \"\".join([str(_i.value) for _i in a])\n                    if astr == \"...\":\n                        variadic = True\n                        a[0].type = self.t_ID\n                        a[0].value = '__VA_ARGS__'\n                        variadic = True\n                        del a[1:]\n                        continue\n                    elif astr[-3:] == \"...\" and a[0].type == self.t_ID:\n                        variadic = True\n                        del a[1:]\n                        # If, for some reason, \".\" is part of the identifier, strip off the name for the purposes\n                        # of macro expansion\n                        if a[0].value[-3:] == '...':\n                            a[0].value = a[0].value[:-3]\n                        continue\n                    # Empty arguments are permitted\n                    if len(a) == 0 and len(args) == 1:\n                        continue\n                    if len(a) > 1 or a[0].type != self.t_ID:\n                        self.on_error(a[0].source,a[0].lineno,\"Invalid macro argument\")\n                        break\n                else:\n                    mvalue = self.tokenstrip(linetok[1+tokcount:])\n                    i = 0\n                    while i < len(mvalue):\n                        if i+1 < len(mvalue):\n                            if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':\n                                del mvalue[i]\n                                continue\n                            elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:\n                                del mvalue[i+1]\n                        i += 1\n                    m = Macro(name.value,mvalue,[x[0].value for x in args] if args != [[]] else [],variadic)\n                    self.macro_prescan(m)\n                    add_macro(self, name, m)\n            else:\n                self.on_error(name.source,name.lineno,\"Bad macro definition\")\n        #except LookupError:\n        #    print(\"Bad macro definition\")\n        except:\n            raise\n\n    # ----------------------------------------------------------------------\n    # undef()\n    #\n    # Undefine a macro\n    # ----------------------------------------------------------------------\n\n    def undef(self,tokens):\n        \"\"\"Undefine a macro\"\"\"\n        if isinstance(tokens,STRING_TYPES):\n            tokens = self.tokenize(tokens)\n        id = tokens[0].value\n        try:\n            del self.macros[id]\n        except LookupError:\n            pass\n\n    # ----------------------------------------------------------------------\n    # parse()\n    #\n    # Parse input text.\n    # ----------------------------------------------------------------------\n    def parse(self,input,source=None,ignore={}):\n        \"\"\"Parse input text.\"\"\"\n        if isinstance(input, FILE_TYPES):\n            if source is None:\n                source = input.name\n            input = input.read()\n        self.ignore = ignore\n        self.parser = self.parsegen(input,source,os.path.abspath(source) if source else None)\n        if source is not None:\n            dname = os.path.dirname(source)\n            self.temp_path.insert(0,dname)\n        \n    # ----------------------------------------------------------------------\n    # token()\n    #\n    # Method to return individual tokens\n    # ----------------------------------------------------------------------\n    def token(self):\n        \"\"\"Method to return individual tokens\"\"\"\n        try:\n            while True:\n                tok = next(self.parser)\n                if tok.type not in self.ignore:\n                    return tok\n        except StopIteration:\n            self.parser = None\n            return None\n            \n    def write(self, oh=sys.stdout):\n        \"\"\"Calls token() repeatedly, expanding tokens to their text and writing to the file like stream oh\"\"\"\n        lastlineno = 0\n        lastsource = None\n        done = False\n        blanklines = 0\n        while not done:\n            emitlinedirective = False\n            toks = []\n            all_ws = True\n            # Accumulate a line\n            while not done:\n                tok = self.token()\n                if not tok:\n                    done = True\n                    break\n                toks.append(tok)\n                if tok.value and tok.value[0] == '\\n':\n                    break\n                if tok.type not in self.t_WS:\n                    all_ws = False\n            if not toks:\n                break\n            if all_ws:\n                # Remove preceding whitespace so it becomes just a LF\n                if len(toks) > 1:\n                    tok = toks[-1]\n                    toks = [ tok ]\n                blanklines += toks[0].value.count('\\n')\n                continue\n            # Filter out line continuations, collapsing before and after if needs be\n            for n in range(len(toks)-1, -1, -1):\n                if toks[n].type in self.t_LINECONT:\n                    if n > 0 and n < len(toks) - 2 and toks[n-1].type in self.t_WS and toks[n+1].type in self.t_WS:\n                        if toks[n-1].type not in self.t_LINECONT:\n                            toks[n-1].value = toks[n-1].value[0]\n                            del toks[n:n+2]\n                    else:\n                        del toks[n]\n            # The line in toks is not all whitespace\n            emitlinedirective = (blanklines > 6) and self.line_directive is not None\n            if hasattr(toks[0], 'source'):\n                if lastsource is None:\n                    if toks[0].source is not None:\n                        emitlinedirective = True\n                    lastsource = toks[0].source\n                elif lastsource != toks[0].source:\n                    emitlinedirective = True\n                    lastsource = toks[0].source\n            # Replace consecutive whitespace in output with a single space except at any indent\n            first_ws = None\n            #print(toks)\n            for n in range(len(toks)-1, -1, -1):\n                tok = toks[n]\n                if first_ws is None:\n                    if tok.type in self.t_SPACE or len(tok.value) == 0:\n                        first_ws = n\n                else:\n                    if tok.type not in self.t_SPACE and len(tok.value) > 0:\n                        m = n + 1\n                        while m != first_ws:\n                            del toks[m]\n                            first_ws -= 1\n                        first_ws = None\n                        if self.compress > 0:\n                            # Collapse a token of many whitespace into single\n                            if toks[m].value and toks[m].value[0] == ' ':\n                                toks[m].value = ' '\n            if not self.compress > 1 and not emitlinedirective:\n                newlinesneeded = toks[0].lineno - lastlineno - 1\n                if newlinesneeded > 6 and self.line_directive is not None:\n                    emitlinedirective = True\n                else:\n                    while newlinesneeded > 0:\n                        oh.write('\\n')\n                        newlinesneeded -= 1\n            lastlineno = toks[0].lineno\n            # Account for those newlines in a multiline comment\n            if emitlinedirective and self.line_directive is not None:\n                oh.write(self.line_directive + ' ' + str(lastlineno) + ('' if lastsource is None else (' \"' + lastsource + '\"' )) + '\\n')\n            for tok in toks:\n                if tok.type == self.t_COMMENT1:\n                    lastlineno += tok.value.count('\\n')\n            blanklines = 0\n            #print toks[0].lineno, \n            for tok in toks:\n                #print tok.value,\n                oh.write(tok.value)\n            #print ''\n\nif __name__ == \"__main__\":\n    import doctest\n    doctest.testmod()\n\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"pcpp\"\nversion = \"1.31\"\ndescription = \"A C99 preprocessor written in pure Python\"\nreadme = \"README.rst\"\nlicense = {file = \"LICENSE.txt\"}\nauthors = [\n    {name = \"Niall Douglas and David Beazley\"}\n]\nclassifiers = [\n    \"Development Status :: 5 - Production/Stable\",\n    \"Intended Audience :: Developers\",\n    \"Topic :: Software Development :: Build Tools\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3 :: Only\",\n    \"Programming Language :: Python :: 3.6\",\n    \"Programming Language :: Python :: 3.7\",\n    \"Programming Language :: Python :: 3.8\",\n    \"Programming Language :: Python :: 3.9\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\"\n]\nrequires-python = \">=3.6\"\n\n[project.scripts]\npcpp = \"pcpp:main\"\n\n[tool.setuptools]\npackages = [\"pcpp\", \"pcpp.ply.ply\"]\n\n[tool.setuptools.package-data]\npcpp = [\"../LICENSE.txt\"]\n\n[tool.pytest.ini_options]\ntestpaths = [\"tests\"]\npython_files = [\"test_*.py\", \"*_test.py\", \"tests/*.py\"]\naddopts = [\"-v\"]\n"
  },
  {
    "path": "requirements.txt",
    "content": "setuptools\nwheel\npytest\n"
  },
  {
    "path": "setup.py",
    "content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport re\nimport sys\n\n\nclass PyTest(TestCommand):\n    \"\"\"Custom test command that uses pytest\"\"\"\n    user_options = [('pytest-args=', 'a', \"Arguments to pass to pytest\")]\n\n    def initialize_options(self):\n        TestCommand.initialize_options(self)\n        self.pytest_args = []\n\n    def run_tests(self):\n        import subprocess\n        import sys\n        \n        # Install pytest if not available\n        try:\n            import pytest\n        except ImportError:\n            print(\"Installing pytest...\")\n            subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'pytest'])\n            import pytest\n            \n        # Run pytest\n        errno = pytest.main(['tests/'] + self.pytest_args)\n        sys.exit(errno)\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(os.path.join(here, 'README.rst')) as f:\n    long_description = f.read()\n\n# Read version from pcpp/pcmd.py without importing it\nwith open(os.path.join(here, 'pcpp', 'pcmd.py')) as f:\n    content = f.read()\n    version_match = re.search(r\"^version=['\\\"]([^'\\\"]*)['\\\"]\", content, re.M)\n    if version_match:\n        version = version_match.group(1)\n    else:\n        raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n    name='pcpp',\n    version=version,\n    description='A C99 preprocessor written in pure Python',\n    long_description=long_description,\n    author='Niall Douglas and David Beazley',\n    url='https://github.com/ned14/pcpp',\n    packages=['pcpp', 'pcpp.ply.ply'],\n    package_data={'pcpp' : ['../LICENSE.txt']},\n    entry_points={\n        'console_scripts': [ 'pcpp=pcpp:main' ]\n    },\n    options={'bdist_wheel':{'universal':False}},  # Changed to False since we're Python 3 only now\n    license='BSD',\n    classifiers=[\n        'Development Status :: 5 - Production/Stable',\n        'Intended Audience :: Developers',\n        'Topic :: Software Development :: Build Tools',\n        'Programming Language :: Python :: 3',\n        'Programming Language :: Python :: 3 :: Only',\n        'Programming Language :: Python :: 3.6',\n        'Programming Language :: Python :: 3.7',\n        'Programming Language :: Python :: 3.8',\n        'Programming Language :: Python :: 3.9',\n        'Programming Language :: Python :: 3.10',\n        'Programming Language :: Python :: 3.11',\n        'Programming Language :: Python :: 3.12',\n    ],\n    cmdclass={'test': PyTest},\n)\n"
  },
  {
    "path": "tests/Readme.md",
    "content": "test-c was borrowed from the test suite for the mcpp preprocessor\n\nhttp://mcpp.sourceforge.net/\n"
  },
  {
    "path": "tests/__init__.py",
    "content": ""
  },
  {
    "path": "tests/alternate_input_encodings.py",
    "content": "﻿import unittest, sys, io, os\n\nshouldbe1 = r'''#line 1 \"tests/alternate_input_encodings1_ucs16le.c\"\n语言处理\n'''\n\nshouldbe2 = r'''#line 1 \"tests/alternate_input_encodings1_ucs16le.c\"\n语言处理\n#line 1 \"tests/alternate_input_encodings2_ucs16le.c\"\nいろはにほへとちりぬるを\n'''\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import CmdPreprocessor\n        if self.multiple:\n            p = CmdPreprocessor(['pcpp', '-o', 'tests/alternate_input_encodings.c',\n                                 '--assume-input-encoding', 'utf_16_le',\n                                 '--output-encoding', 'utf_8',\n                                 'tests/alternate_input_encodings1_ucs16le.c',\n                                 'tests/alternate_input_encodings2_ucs16le.c'])\n        else:\n            p = CmdPreprocessor(['pcpp', '-o', 'tests/alternate_input_encodings.c',\n                                 '--assume-input-encoding', 'utf_16_le',\n                                 '--output-encoding', 'utf_8',\n                                 'tests/alternate_input_encodings1_ucs16le.c'])\n        with io.open('tests/alternate_input_encodings.c', 'rt', encoding='utf-8') as ih:\n            output = ih.read()\n        os.remove('tests/alternate_input_encodings.c')\n        if output != self.shouldbe:\n            print(\"Should be:\\n\" + repr(self.shouldbe) + \"EOF\\n\", file = sys.stderr)\n            print(\"\\nWas:\\n\" + repr(output) + \"EOF\\n\", file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(output, self.shouldbe)\n\nclass single_input_file(unittest.TestCase, runner):\n    multiple = False\n    shouldbe = shouldbe1\n\nclass multiple_input_files(unittest.TestCase, runner):\n    multiple = True\n    shouldbe = shouldbe2\n"
  },
  {
    "path": "tests/cstd.py",
    "content": "\nimport unittest, time\nfrom io import StringIO\nclock = time.process_time\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import Preprocessor\n        import os, sys\n\n        start = clock()\n        p = Preprocessor()\n        p.parse(self.input)\n        oh = StringIO()\n        p.write(oh)\n        end = clock()\n        print(\"Preprocessed test in\", end-start, \"seconds\")\n        if oh.getvalue() != self.output:\n            print(\"Should be:\\n\" + self.output, file = sys.stderr)\n            print(\"\\n\\nWas:\\n\" + oh.getvalue(), file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(oh.getvalue(), self.output)\n\n            \n# These preprocessor test fragments are borrowed from the C11 standard\n\nclass std1(unittest.TestCase, runner):\n    input = r\"\"\"#define x 3\n#define f(a) f(x * (a))\n#undef x\n#define x 2\n#define g f\n#define z z[0]\n#define h g(~\n#define m(a) a(w)\n#define w 0,1\n#define t(a) a\n#define p() int\n#define q(x) x\n#define r(x,y) x ## y\n#define str(x) # x\nf(y+1) + f(f(z)) % t(t(g)(0) + t)(1);\ng(x+(3,4)-w) | h 5) & m\n(f)^m(m);\np() i[q()] = { q(1), r(2,3), r(4,), r(,5), r(,) };\nchar c[2][6] = { str(hello), str() };\"\"\"\n    output = r\"\"\"#line 15\nf(2 * (y+1)) + f(2 * (f(2 * (z[0])))) % f(2 * (0)) + t(1);\nf(2 * (2+(3,4)-0,1)) | f(2 * (~ 5)) & f(2 * (0,1))^m(0,1);\n\nint i[] = { 1, 23, 4, 5, };\nchar c[2][6] = { \"hello\", \"\" };\n\"\"\"\n\nclass std2(unittest.TestCase, runner):\n    input = r\"\"\"#define TWO_ARGS        a,b\n#define sub( x, y)      (x - y)\n    assert( sub( TWO_ARGS, 1) == 1);\n\"\"\"\n    output = r\"\"\"\n\n    assert( (a,b - 1) == 1);\n\"\"\"\n\nclass std3(unittest.TestCase, runner):\n    input = r\"\"\"#define t(x,y,z) x ## y ## z\nint j[] = { t(1,2,3), t(,4,5), t(6,,7), t(8,9,),\nt(10,,), t(,11,), t(,,12), t(,,) };\n\"\"\"\n    output = r\"\"\"\nint j[] = { 123, 45, 67, 89,\n10, 11, 12, };\n\"\"\"\n\nclass std4(unittest.TestCase, runner):\n    input = r\"\"\"#define debug(...) fprintf(stderr, __VA_ARGS__)\n#define showlist(...) puts(#__VA_ARGS__)\n#define report(test, ...) ((test)?puts(#test):\\\nprintf(__VA_ARGS__))\ndebug(\"Flag\");\ndebug(\"X = %d\\n\", x);\nshowlist(The first, second, and third items.);\nreport(x>y, \"x is %d but y is %d\", x, y);\n\"\"\"\n    output = r\"\"\"\n\n\n\nfprintf(stderr, \"Flag\");\nfprintf(stderr, \"X = %d\\n\", x);\nputs(\"The first, second, and third items.\");\n((x>y)?puts(\"x>y\"):printf( \"x is %d but y is %d\", x, y));\n\"\"\"\n\nclass std5(unittest.TestCase, runner):\n    input = r\"\"\"#define Z   Z[0]\n    assert( Z == 1);\n#define AB  BA\n#define BA  AB\n    assert( AB == 1);\n#define f(a)    a + f(a)\n    assert( f( x) == 2);\n#define g(a)    a + h( a)\n#define h(a)    a + g( a)\n    assert( g( x) == 4);\n    assert( f( Z) == 2);\n\"\"\"\n    output = r\"\"\"\n    assert( Z[0] == 1);\n\n\n    assert( AB == 1);\n\n    assert( x + f(x) == 2);\n\n\n    assert( x + x + g( x) == 4);\n    assert( Z[0] + f(Z[0]) == 2);\n\"\"\"\n\n\nclass std6(unittest.TestCase, runner):\n    input = r\"\"\"#define MACRO_0         0\n#define MACRO_1         1\n#define glue( a, b)     a ## b\n    assert( glue( MACRO_0, MACRO_1) == 2);\n\"\"\"\n    output = r\"\"\"\n\n\n    assert( MACRO_0MACRO_1 == 2);\n\"\"\"\n\n\nclass std7(unittest.TestCase, runner):\n    input = r\"\"\"#if     0\n    \"nonsence\"; /*\n#else\n    still in\n    comment     */\n#else\n#define MACRO_abcd  /*\n    in comment\n    */  abcd\n#endif\n    assert( MACRO_abcd == 4);\n\"\"\"\n    output = r\"\"\"#line 11\n    assert( abcd == 4);\n\"\"\"\n\nclass std8(unittest.TestCase, runner):\n    input = r\"\"\"#if 0\nniall\n#elif 0\ndouglas\n#elif 1\nfoo\n#endif\n\"\"\"\n    output = r\"\"\"\n\n\n\n\nfoo\n\"\"\"\n\n\nclass std9(unittest.TestCase, runner):\n    input = r'''#define str(x) # x\nstr(    niall  is      a   /* comment */\n   pretty      boy           )\n'''\n    output = r'''\n\"niall is a pretty boy\"\n'''\n\n\nclass std10(unittest.TestCase, runner):\n    input = r\"\"\"#define MACRO_0         0\n#define MACRO_1         1\n#define glue( a, b)     a ## b\n    assert( glue( MACRO_, 1) == 1);\n\"\"\"\n    output = r\"\"\"\n\n\n    assert( 1 == 1);\n\"\"\"\n\nclass std11(unittest.TestCase, runner):\n    input = r\"\"\"#define FUNC( a, b, c)      a + b + c\n        FUNC\n        (\n            a,\n            b,\n            c\n        )\n        == 6\n\"\"\"\n    output = r\"\"\"\n        a + b + c\n\n\n\n\n\n        == 6\n\"\"\"\n\nclass test12(unittest.TestCase, runner):\n    input = r\"\"\"\n#define BOOSTLITE_GLUE(x, y) x y\n\n#define BOOSTLITE_RETURN_ARG_COUNT(_1_, _2_, _3_, _4_, _5_, _6_, _7_, _8_, count, ...) count\n#define BOOSTLITE_EXPAND_ARGS(args) BOOSTLITE_RETURN_ARG_COUNT args\n#define BOOSTLITE_COUNT_ARGS_MAX8(...) BOOSTLITE_EXPAND_ARGS((__VA_ARGS__, 8, 7, 6, 5, 4, 3, 2, 1, 0))\n\n#define BOOSTLITE_OVERLOAD_MACRO2(name, count) name##count\n#define BOOSTLITE_OVERLOAD_MACRO1(name, count) BOOSTLITE_OVERLOAD_MACRO2(name, count)\n#define BOOSTLITE_OVERLOAD_MACRO(name, count) BOOSTLITE_OVERLOAD_MACRO1(name, count)\n\n#define BOOSTLITE_CALL_OVERLOAD(name, ...) BOOSTLITE_GLUE(BOOSTLITE_OVERLOAD_MACRO(name, BOOSTLITE_COUNT_ARGS_MAX8(__VA_ARGS__)), (__VA_ARGS__))\n\n#define BOOSTLITE_GLUE_(x, y) x y\n\n#define BOOSTLITE_RETURN_ARG_COUNT_(_1_, _2_, _3_, _4_, _5_, _6_, _7_, _8_, count, ...) count\n#define BOOSTLITE_EXPAND_ARGS_(args) BOOSTLITE_RETURN_ARG_COUNT_ args\n#define BOOSTLITE_COUNT_ARGS_MAX8_(...) BOOSTLITE_EXPAND_ARGS_((__VA_ARGS__, 8, 7, 6, 5, 4, 3, 2, 1, 0))\n\n#define BOOSTLITE_OVERLOAD_MACRO2_(name, count) name##count\n#define BOOSTLITE_OVERLOAD_MACRO1_(name, count) BOOSTLITE_OVERLOAD_MACRO2_(name, count)\n#define BOOSTLITE_OVERLOAD_MACRO_(name, count) BOOSTLITE_OVERLOAD_MACRO1_(name, count)\n\n#define BOOSTLITE_CALL_OVERLOAD_(name, ...) BOOSTLITE_GLUE_(BOOSTLITE_OVERLOAD_MACRO_(name, BOOSTLITE_COUNT_ARGS_MAX8_(__VA_ARGS__)), (__VA_ARGS__))\n\n#define BOOSTLITE_BIND_STRINGIZE(a) #a\n#define BOOSTLITE_BIND_STRINGIZE2(a) BOOSTLITE_BIND_STRINGIZE(a)\n#define BOOSTLITE_BIND_NAMESPACE_VERSION8(a, b, c, d, e, f, g, h) a##_##b##_##c##_##d##_##e##_##f##_##g##_##h\n#define BOOSTLITE_BIND_NAMESPACE_VERSION7(a, b, c, d, e, f, g) a##_##b##_##c##_##d##_##e##_##f##_##g\n#define BOOSTLITE_BIND_NAMESPACE_VERSION6(a, b, c, d, e, f) a##_##b##_##c##_##d##_##e##_##f\n#define BOOSTLITE_BIND_NAMESPACE_VERSION5(a, b, c, d, e) a##_##b##_##c##_##d##_##e\n#define BOOSTLITE_BIND_NAMESPACE_VERSION4(a, b, c, d) a##_##b##_##c##_##d\n#define BOOSTLITE_BIND_NAMESPACE_VERSION3(a, b, c) a##_##b##_##c\n#define BOOSTLITE_BIND_NAMESPACE_VERSION2(a, b) a##_##b\n#define BOOSTLITE_BIND_NAMESPACE_VERSION1(a) a\n#define BOOSTLITE_BIND_NAMESPACE_VERSION(...) BOOSTLITE_CALL_OVERLOAD(BOOSTLITE_BIND_NAMESPACE_VERSION, __VA_ARGS__)\n\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT2(name, modifier) modifier namespace name {\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT1(name) namespace name {\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT(...) BOOSTLITE_CALL_OVERLOAD_(BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT, __VA_ARGS__)\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND8(a, b, c, d, e, f, g, h) BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT a BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND7(b, c, d, e, f, g, h)\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND7(a, b, c, d, e, f, g) BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT a BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND6(b, c, d, e, f, g)\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND6(a, b, c, d, e, f) BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT a BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND5(b, c, d, e, f)\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND5(a, b, c, d, e) BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT a BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND4(b, c, d, e)\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND4(a, b, c, d) BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT a BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND3(b, c, d)\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND3(a, b, c) BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT a BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND2(b, c)\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND2(a, b) BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT a BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND1(b)\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND1(a) BOOSTLITE_BIND_NAMESPACE_BEGIN_NAMESPACE_SELECT a\n#define BOOSTLITE_BIND_NAMESPACE_BEGIN(...) BOOSTLITE_CALL_OVERLOAD(BOOSTLITE_BIND_NAMESPACE_BEGIN_EXPAND, __VA_ARGS__)\n\n#define BOOST_OUTCOME_VERSION_GLUE2(a, b, c) a##b##c\n#define BOOST_OUTCOME_VERSION_GLUE(a, b, c) BOOST_OUTCOME_VERSION_GLUE2(a, b, c)\n\n#define BOOST_OUTCOME_VERSION_MAJOR 1\n#define BOOST_OUTCOME_VERSION_MINOR 0\n#define BOOST_OUTCOME_VERSION_PATCH 0\n#define BOOST_OUTCOME_VERSION_REVISION 0\n#define BOOST_OUTCOME_NAMESPACE_VERSION BOOST_OUTCOME_VERSION_GLUE(BOOST_OUTCOME_VERSION_MAJOR, _, BOOST_OUTCOME_VERSION_MINOR)\n\n#define BOOST_OUTCOME_PREVIOUS_COMMIT_UNIQUE 01320023\n\n#define BOOST_OUTCOME_V1_STL11_IMPL std\n#define BOOST_OUTCOME_V1_ERROR_CODE_IMPL std\n#define BOOST_OUTCOME_V1 (boost), (outcome), (BOOSTLITE_BIND_NAMESPACE_VERSION(, BOOST_OUTCOME_NAMESPACE_VERSION, BOOST_OUTCOME_V1_STL11_IMPL, BOOST_OUTCOME_V1_ERROR_CODE_IMPL, BOOST_OUTCOME_PREVIOUS_COMMIT_UNIQUE), inline)\n#define BOOST_OUTCOME_V1_NAMESPACE_BEGIN BOOSTLITE_BIND_NAMESPACE_BEGIN(BOOST_OUTCOME_V1)\n\nBOOSTLITE_BIND_NAMESPACE_VERSION(, BOOST_OUTCOME_NAMESPACE_VERSION, BOOST_OUTCOME_V1_STL11_IMPL, BOOST_OUTCOME_V1_ERROR_CODE_IMPL, BOOST_OUTCOME_PREVIOUS_COMMIT_UNIQUE)\nBOOST_OUTCOME_V1_NAMESPACE_BEGIN\n\"\"\"\n    output = r\"\"\"#line 67\n_1_0_std_std_01320023\nnamespace boost { namespace outcome { inline namespace _1_0_std_std_01320023 {\n\"\"\"\n\nclass test13(unittest.TestCase, runner):\n    input = r\"\"\"\n#define _CRT_INTERNAL_NONSTDC_NAMES                                            \\\n    (                                                                          \\\n        ( defined _CRT_DECLARE_NONSTDC_NAMES && _CRT_DECLARE_NONSTDC_NAMES) || \\\n        (!defined _CRT_DECLARE_NONSTDC_NAMES && !__STDC__                 )    \\\n    )\n#if _CRT_INTERNAL_NONSTDC_NAMES\nfoo\n#endif\n\"\"\"\n    output = r\"\"\"#line 8\nfoo\n\"\"\"\n\nclass test14(unittest.TestCase, runner):\n    input = r\"\"\"\n# if defined __GNUC__ // NOTE: GNUC is also defined for Clang\n#   if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)\n#     define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___\n#   elif (__GNUC__ > 4)\n#     define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___\n#   endif\n# \n#   if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)\n#     define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___\n#   elif (__GNUC__ > 4)\n#     define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___\n#   endif\n#\n#   if (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) && (__GNUC_PATCHLEVEL__ >= 1)\n#     define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___\n#   elif (__GNUC__ == 4) && (__GNUC_MINOR__ >= 9)\n#     define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___\n#   elif (__GNUC__ > 4)\n#     define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___\n#   endif\n# endif\nfoo\n\"\"\"\n    output = r\"\"\"#line 23\nfoo\n\"\"\"\n\nclass test15(unittest.TestCase, runner):\n    input = r\"\"\"#define f(type) type type##_base\nf(g)\n\"\"\"\n    output = r\"\"\"\ng g_base\n\"\"\"\n\nclass test16(unittest.TestCase, runner):\n    # #if ((1?2:3) == 2) is known to fail\n    input = r\"\"\"#if (((1)?2:3) == 2)\nhi\n#endif\n\"\"\"\n    output = r\"\"\"\nhi\n\"\"\"\n\nclass test17(unittest.TestCase, runner):\n    input = r\"\"\"#if L'\\0' == 0\nhi\n#endif\n\"\"\"\n    output = r\"\"\"\nhi\n\"\"\"\n\nclass test18(unittest.TestCase, runner):\n    input = r\"\"\"\n/*\nmultiline\ncomment\n*/\n\nvoid shouldBeOnLineSeven();\n\"\"\"\n    output = r\"\"\"\n\n\n\n\n\nvoid shouldBeOnLineSeven();\n\"\"\"\n\n\nclass test19(unittest.TestCase, runner):\n    input = r\"\"\"\n/*\na\ncomment\nthat\nspans\neight\nlines\n*/\n\nvoid shouldBeOnLineEleven();\"\"\"\n    output = r\"\"\"#line 11\nvoid shouldBeOnLineEleven();\n\"\"\"\n\nclass test20(unittest.TestCase, runner):\n    input = r\"\"\"\n#define PASTE(x, y) x ## y\n#if PASTE(1, 2) == 12\n  works\n#else\n  fails\n#endif\n\"\"\"\n    output = r\"\"\"\n\n\n  works\n\"\"\"\n\nclass test21(unittest.TestCase, runner):\n    input = r\"\"\"\n#define PASTE(x, y, z) x ## y ## z\n#if PASTE(1, 2, L) == 12\n  works\n#else\n  fails\n#endif\n\"\"\"\n    output = r\"\"\"\n\n\n  works\n\"\"\"\n\nclass test22(unittest.TestCase, runner):\n    input = r\"\"\"\n#define OUTCOME_TRY_GLUE2(x, y) x##y\n#define OUTCOME_TRY_GLUE(x, y) OUTCOME_TRY_GLUE2(x, y)\n#define OUTCOME_TRY_UNIQUE_NAME OUTCOME_TRY_GLUE(_outcome_try_unique_name_temporary, __LINE__)\n\nOUTCOME_TRY_UNIQUE_NAME\nOUTCOME_TRY_UNIQUE_NAME\nOUTCOME_TRY_UNIQUE_NAME\n\"\"\"\n    output = r\"\"\"\n\n\n\n\n_outcome_try_unique_name_temporary6\n_outcome_try_unique_name_temporary7\n_outcome_try_unique_name_temporary8\n\"\"\"\n\nclass test23(unittest.TestCase, runner):\n    input = r\"\"\"\n#define FUNC1(rettype) rettype\n#define FUNC2 void\n\nFUNC1(void)foo()\n{\n}\n\nFUNC2|foo()\n{\n}\n\nFUNC1(void)/foo()\n{\n}\n\nFUNC1(void)FUNC2.FUNC1(void)foo()\n{\n}\"\"\"\n    output = r\"\"\"\n\n\n\nvoid foo()\n{\n}\n\nvoid|foo()\n{\n}\n\nvoid/foo()\n{\n}\n\nvoid void.void foo()\n{\n}\n\"\"\"\n\nclass test24(unittest.TestCase, runner):\n    input = r\"\"\"\nconst char *foo = \"Hello\"\\\n\"Niall\"\\\n;\"\"\"\n    output = r\"\"\"\nconst char *foo = \"Hello\"\"Niall\";\n\"\"\"\n\nclass test25(unittest.TestCase, runner):\n    input = r\"\"\"\nconst char *foo = \"Niall \\ \nsays\\\nhello\"\\;\"\"\"\n    output = r\"\"\"\nconst char *foo = \"Niall sayshello\"\\;\n\"\"\"\n\nclass test26(unittest.TestCase, runner):\n    input = r\"\"\"\n#if BOO(FOO)\nfail\n#else\nsuccess\n#endif\n\"\"\"\n    output = r\"\"\"\n\n\n\nsuccess\n\"\"\"\n\nclass test27(unittest.TestCase, runner):\n    input = r\"\"\"\n#define BOOST_WORKAROUND(symbol, test)                \\\n       ((symbol ## _WORKAROUND_GUARD + 0 == 0) &&     \\\n       (symbol != 0) && (1 % (( (symbol test) ) + 1)))\n\n#define BOOST_MSVC 1916\n#define BOOST_MSVC_WORKAROUND_GUARD 0\nBOOST_WORKAROUND(BOOST_MSVC,==1916)\n\"\"\"\n    output = r\"\"\"#line 8\n((0 + 0 == 0) && (1916 != 0) && (1 % (( (1916 ==1916) ) + 1)))\n\"\"\"\n\n\nclass test28(unittest.TestCase, runner):\n    input = r\"\"\"\n#define TEST \\\n1 + 2   \\\n3 + 4\nTEST\n\"\"\"\n    output = r\"\"\"\n\n\n\n1 + 2   3 + 4\n\"\"\"\n\nclass test29(unittest.TestCase, runner):\n    input = r\"\"\"\n#define fCAST4_8s(A) ((int64_t)((int32_t)(A)))\n#define fBIDIR_SHIFTR(SRC, SHAMT, REGSTYPE)    (((SHAMT) < 0) ? ((fCAST##REGSTYPE(SRC) << ((-(SHAMT)) - 1)) << 1)                   : (fCAST##REGSTYPE(SRC) >> (SHAMT)))\n#define fBIDIR_ASHIFTR(SRC, SHAMT, REGSTYPE)    fBIDIR_SHIFTR(SRC, SHAMT, REGSTYPE##s)\n#define fSXTN(N, M, VAL) (((N) != 0) ? sextract64((VAL), 0, (N)) : 0LL)\n#define fHIDE(A) A\n#define fECHO(A) (A)\n#define DEF_SHORTCODE(TAG,SHORTCODE)    insn(TAG, SHORTCODE)\n\nDEF_SHORTCODE(S2_asr_r_r_acc, { fHIDE(size4s_t) shamt=fSXTN(7,32,RtV); RxV = fECHO(RxV + fBIDIR_ASHIFTR(RsV,shamt,4_8)); })\n\"\"\"\n    output = r\"\"\"#line 10\ninsn(S2_asr_r_r_acc, { size4s_t shamt=(((7) != 0) ? sextract64((RtV), 0, (7)) : 0LL); RxV = (RxV + (((shamt) < 0) ? ((((int64_t)((int32_t)(RsV))) << ((-(shamt)) - 1)) << 1)                   : (((int64_t)((int32_t)(RsV))) >> (shamt)))); })\n\"\"\"\n\nclass test30(unittest.TestCase, runner):\n    input = r\"\"\"\n#define FOO(x) x\n#define BAR FOO(BAR)\nBAR\n\"\"\"\n    output = r\"\"\"\n\n\nBAR\n\"\"\"\n\nclass test31(unittest.TestCase, runner):\n    input = r\"\"\"\n#define PCRE2_SIZE            size_t\n\n#define PCRE2_COMPILE_FUNCTIONS \\\npcre2_code *pcre2_compile(PCRE2_SPTR, PCRE2_SIZE, uint32_t, int *, PCRE2_SIZE *, \\\n    pcre2_compile_context *);\n\n#define PCRE2_JOIN(a,b) a ## b\n#define PCRE2_GLUE(a,b) PCRE2_JOIN(a,b)\n#define PCRE2_SUFFIX(a) PCRE2_GLUE(a,PCRE2_LOCAL_WIDTH)\n\n#define PCRE2_SPTR                  PCRE2_SUFFIX(PCRE2_SPTR)\n#define pcre2_code                  PCRE2_SUFFIX(pcre2_code_)\n#define pcre2_compile_context          PCRE2_SUFFIX(pcre2_compile_context_)\n#define pcre2_compile                         PCRE2_SUFFIX(pcre2_compile_)\n\n#define PCRE2_TYPES_STRUCTURES_AND_FUNCTIONS \\\nPCRE2_COMPILE_FUNCTIONS\n\n#define PCRE2_LOCAL_WIDTH 8\nPCRE2_TYPES_STRUCTURES_AND_FUNCTIONS\n#undef PCRE2_LOCAL_WIDTH\n\n#define PCRE2_LOCAL_WIDTH 16\nPCRE2_TYPES_STRUCTURES_AND_FUNCTIONS\n#undef PCRE2_LOCAL_WIDTH\n\n#define PCRE2_LOCAL_WIDTH 32\nPCRE2_TYPES_STRUCTURES_AND_FUNCTIONS\n#undef PCRE2_LOCAL_WIDTH\n\"\"\"\n    output = r\"\"\"#line 21\npcre2_code_8 *pcre2_compile_8(PCRE2_SPTR8, size_t, uint32_t, int *, size_t *, pcre2_compile_context_8 *);\n\n\n\npcre2_code_16 *pcre2_compile_16(PCRE2_SPTR16, size_t, uint32_t, int *, size_t *, pcre2_compile_context_16 *);\n\n\n\npcre2_code_32 *pcre2_compile_32(PCRE2_SPTR32, size_t, uint32_t, int *, size_t *, pcre2_compile_context_32 *);\n\"\"\"\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "tests/doctests.py",
    "content": "import unittest\n\nclass pcpp_doctests(unittest.TestCase):\n    def runTest(self):\n        import doctest, pcpp.preprocessor, pcpp.evaluator\n        failurecount, testcount = doctest.testmod(pcpp.evaluator)\n        self.assertGreater(testcount, 0)\n        self.assertEqual(failurecount, 0)\n        failurecount, testcount = doctest.testmod(pcpp.preprocessor)\n        #self.assertGreater(testcount, 0)\n        self.assertEqual(failurecount, 0)\n\n"
  },
  {
    "path": "tests/embedded.py",
    "content": "\nimport unittest, sys\nfrom io import StringIO\n\nclass embedded1(unittest.TestCase):\n    def runTest(self):\n        from pcpp import Preprocessor\n        output = r'''\n\na\n'''\n\n        p = Preprocessor()\n        p.define('BAR FOO')\n        p.parse(r'''#define FOO 1\n#if FOO == BAR\na\n#endif\n''')\n        oh = StringIO()\n        p.write(oh)\n        if oh.getvalue() != output:\n            print(\"Should be:\\n\" + output, file = sys.stderr)\n            print(\"\\n\\nWas:\\n\" + oh.getvalue(), file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(oh.getvalue(), output)\n        \n"
  },
  {
    "path": "tests/eval.py",
    "content": "import unittest, time\nfrom io import StringIO\nclock = time.process_time\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import Preprocessor\n        import os, sys\n\n        start = clock()\n        #p = Preprocessor()\n        #lines = p.group_lines(self.input, '')\n        #for x in lines:\n        #    print(x)\n        p = Preprocessor()\n        p.parse(self.input)\n        oh = StringIO()\n        p.write(oh)\n        end = clock()\n        print(\"Preprocessed test in\", end-start, \"seconds\")\n        if oh.getvalue() != self.output:\n            print(\"Should be:\\n\" + self.output, file = sys.stderr)\n            print(\"\\n\\nWas:\\n\" + oh.getvalue(), file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(oh.getvalue(), self.output)\n\n\nclass eval1(unittest.TestCase, runner):\n    input = r\"\"\"#if -1 >= 0U\ncorrect\n#endif\"\"\"\n    output = r\"\"\"\ncorrect\n\"\"\"\n\nclass eval2(unittest.TestCase, runner):\n    input = r\"\"\"#if 1<<2 == 4\ncorrect\n#endif\"\"\"\n    output = r\"\"\"\ncorrect\n\"\"\"\n\nclass eval3(unittest.TestCase, runner):\n    input = r\"\"\"#if (-!+!9) == -1\ncorrect\n#endif\"\"\"\n    output = r\"\"\"\ncorrect\n\"\"\"\n\nclass eval4(unittest.TestCase, runner):\n    input = r\"\"\"#if (2 || 3) == 1\ncorrect\n#endif\"\"\"\n    output = r\"\"\"\ncorrect\n\"\"\"\n"
  },
  {
    "path": "tests/issue0017/inc.h",
    "content": "inc1\n#ifndef __inc_h__\ninc2\n#define __inc_h__\ninc3\n#endif //__inc_h__\ninc4\n"
  },
  {
    "path": "tests/issue0017/issue0017.c",
    "content": "#include \"inc.h\"\ntest1\n#include \"inc.h\"\ntest2\n"
  },
  {
    "path": "tests/issue0017-ref.i",
    "content": "#line 1 \"tests/issue0017/inc.h\"\ninc1\n\ninc2\n\ninc3\n\ninc4\n#line 2 \"tests/issue0017/issue0017.c\"\ntest1\n#line 1 \"tests/issue0017/inc.h\"\ninc1\n\n\n\n\n\ninc4\n#line 4 \"tests/issue0017/issue0017.c\"\ntest2\n"
  },
  {
    "path": "tests/issue0017.py",
    "content": "\nimport unittest, os\n\nclass issue0017(unittest.TestCase):\n    def runTest(self):\n        from pcpp import Preprocessor\n        import os, sys\n\n        p = Preprocessor()\n        path = 'tests/issue0017/issue0017.c'\n        with open(path, 'rt') as ih:\n            p.parse(ih.read(), path)\n        with open('tests/issue0017.i', 'w') as oh:\n            p.write(oh)\n        with open('tests/issue0017.i', 'r') as ih:\n            was = ih.read()\n        os.remove('tests/issue0017.i')\n        with open('tests/issue0017-ref.i', 'r') as ih:\n            shouldbe = ih.read()\n        if was != shouldbe:\n            print(\"Should be:\\n\" + shouldbe, file = sys.stderr)\n            print(\"\\n\\nWas:\\n\" + was, file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(was, shouldbe)\n"
  },
  {
    "path": "tests/issue0025/inc.h",
    "content": "OUTCOME_TRY_UNIQUE_NAME __LINE__\n\nOUTCOME_TRY_UNIQUE_NAME __LINE__\n"
  },
  {
    "path": "tests/issue0025/main.c",
    "content": "#define OUTCOME_TRY_GLUE2(x, y) x##y\n#define OUTCOME_TRY_GLUE(x, y) OUTCOME_TRY_GLUE2(x, y)\n#define OUTCOME_TRY_UNIQUE_NAME OUTCOME_TRY_GLUE(_outcome_try_unique_name_temporary, __LINE__)\n\nOUTCOME_TRY_UNIQUE_NAME __LINE__\nOUTCOME_TRY_UNIQUE_NAME __LINE__\nOUTCOME_TRY_UNIQUE_NAME __LINE__\n\n#include \"inc.h\"\nOUTCOME_TRY_UNIQUE_NAME __LINE__\n#include \"inc.h\"\nOUTCOME_TRY_UNIQUE_NAME __LINE__\n"
  },
  {
    "path": "tests/issue0025-ref.i",
    "content": "#line 5 \"tests/issue0025/main.c\"\n_outcome_try_unique_name_temporary5 5\n_outcome_try_unique_name_temporary6 6\n_outcome_try_unique_name_temporary7 7\n#line 1 \"tests/issue0025/inc.h\"\n_outcome_try_unique_name_temporary1 1\n\n_outcome_try_unique_name_temporary3 3\n#line 10 \"tests/issue0025/main.c\"\n_outcome_try_unique_name_temporary10 10\n#line 1 \"tests/issue0025/inc.h\"\n_outcome_try_unique_name_temporary1 1\n\n_outcome_try_unique_name_temporary3 3\n#line 12 \"tests/issue0025/main.c\"\n_outcome_try_unique_name_temporary12 12\n"
  },
  {
    "path": "tests/issue0025.py",
    "content": "\nimport unittest, os\n\nclass issue0025(unittest.TestCase):\n    def runTest(self):\n        from pcpp import Preprocessor\n        import os, sys\n\n        p = Preprocessor()\n        path = 'tests/issue0025/main.c'\n        with open(path, 'rt') as ih:\n            p.parse(ih.read(), path)\n        with open('tests/issue0025.i', 'w') as oh:\n            p.write(oh)\n        with open('tests/issue0025.i', 'r') as ih:\n            was = ih.read()\n        os.remove('tests/issue0025.i')\n        with open('tests/issue0025-ref.i', 'r') as ih:\n            shouldbe = ih.read()\n        if was != shouldbe:\n            print(\"Should be:\\n\" + shouldbe, file = sys.stderr)\n            print(\"\\n\\nWas:\\n\" + was, file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(was, shouldbe)\n"
  },
  {
    "path": "tests/issue0027.py",
    "content": "\nimport unittest, time\nfrom io import StringIO\nclock = time.process_time\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import Preprocessor, OutputDirective, Action\n        import os, sys\n\n        class PassThruPreprocessor(Preprocessor):\n            def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n                if len(precedingtoks) == 1:\n                    # Execute\n                    return super(PassThruPreprocessor, self).on_directive_handle(directive,toks,ifpassthru,precedingtoks)\n                raise OutputDirective(Action.IgnoreAndPassThrough)\n\n        start = clock()\n        p = PassThruPreprocessor()\n        p.parse(self.input)\n        oh = StringIO()\n        p.write(oh)\n        end = clock()\n        print(\"Preprocessed test in\", end-start, \"seconds\")\n        if oh.getvalue() != self.output:\n            print(\"Should be:\\n\" + self.output + \"EOF\\n\", file = sys.stderr)\n            print(\"\\nWas:\\n\" + oh.getvalue()+\"EOF\\n\", file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(oh.getvalue(), self.output)\n\n            \nclass space_after_hash(unittest.TestCase, runner):\n    input = r\"\"\"#if 5\n1\n#endif\n# if 5\n2\n# endif\n#warning Hi\n# warning Hi2\"\"\"\n    output = r\"\"\"\n1\n\n# if 5\n2\n# endif\n\n# warning Hi2\n\"\"\"\n"
  },
  {
    "path": "tests/issue0030/source1.c",
    "content": "#undef FOO\n#define FOO 1\n"
  },
  {
    "path": "tests/issue0030/source2.c",
    "content": "#undef FOO\n#define FOO 2\n"
  },
  {
    "path": "tests/issue0030/source3.c",
    "content": "FOO\n"
  },
  {
    "path": "tests/issue0030.py",
    "content": "\nimport unittest, sys, os\n\nshouldbe = r'''#line 1 \"tests/issue0030/source3.c\"\n2\n'''\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import CmdPreprocessor\n        p = CmdPreprocessor(['pcpp', '-o', 'tests/issue0030.c',\n                             'tests/issue0030/source1.c',\n                             'tests/issue0030/source2.c',\n                             'tests/issue0030/source3.c'])\n        with open('tests/issue0030.c', 'rt') as ih:\n            output = ih.read()\n        os.remove('tests/issue0030.c')\n        if output != shouldbe:\n            print(\"Should be:\\n\" + shouldbe + \"EOF\\n\", file = sys.stderr)\n            print(\"\\nWas:\\n\" + output + \"EOF\\n\", file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(output, shouldbe)\n\nclass multiple_input_files(unittest.TestCase, runner):\n    pass\n"
  },
  {
    "path": "tests/issue0032.py",
    "content": "\nimport unittest, sys\n\nshouldbe = r'''#line 1 \"tests/issue0030/source3.c\"\n2\n'''\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import CmdPreprocessor\n        p = CmdPreprocessor(['pcpp', '--time',\n                             'tests/issue0030/source1.c'])\n        self.assertEqual(p.return_code, 0)\n\nclass no_output_file(unittest.TestCase, runner):\n    pass\n"
  },
  {
    "path": "tests/issue0037/inc.h",
    "content": "    /** this spans\n        two lines */\n    virtual std::string baseOnly();"
  },
  {
    "path": "tests/issue0037.py",
    "content": "\nimport unittest, sys, os\n\nshouldbe = r'''#line 1 \"tests/issue0037/inc.h\"\n    /** this spans\n        two lines */\n    virtual std::string baseOnly();\n'''\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import CmdPreprocessor\n        p = CmdPreprocessor(['pcpp', '--time', '--passthru-comments',\n                             '-o', 'tests/issue0037.i',\n                             'tests/issue0037/inc.h'])\n        with open('tests/issue0037.i', 'rt') as ih:\n            output = ih.read()\n        os.remove('tests/issue0037.i')\n        if output != shouldbe:\n            print(\"Should be:\\n\" + shouldbe + \"EOF\\n\", file = sys.stderr)\n            print(\"\\nWas:\\n\" + output + \"EOF\\n\", file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(output, shouldbe)\n\nclass multiline_comments(unittest.TestCase, runner):\n    pass\n"
  },
  {
    "path": "tests/issue0044.h",
    "content": "a1\n#if 0\nsample text\n#endif\nb1\n"
  },
  {
    "path": "tests/issue0044.py",
    "content": "\nimport unittest\nimport sys, os\n\nshouldbe = r'''a1\n\n\n\nb1\n'''\n\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import CmdPreprocessor\n        # failure: p = CmdPreprocessor(['pcpp', '--line-directive', '#line',\n        # p = CmdPreprocessor(['pcpp', '--line-directive', 'nothing',\n        # p = CmdPreprocessor(['pcpp', '--line-directive', 'None',\n        p = CmdPreprocessor(['pcpp', '--line-directive', '',\n                             '-o', 'tests/issue0044.i',\n                             'tests/issue0044.h'])\n        with open('tests/issue0044.i', 'rt') as ih:\n            output = ih.read()\n        os.remove('tests/issue0044.i')\n        if output != shouldbe:\n            print(\"Should be:\\n\" + shouldbe + \"EOF\\n\", file=sys.stderr)\n            print(\"\\nWas:\\n\" + output + \"EOF\\n\", file=sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(output, shouldbe)\n\n\nclass empty_line_directive(unittest.TestCase, runner):\n    pass\n"
  },
  {
    "path": "tests/issue0051.c",
    "content": "#include \"issue0051.h\"\n\n#ifdef FOO\n  TRUE\n#else\n  FALSE\n#endif\n"
  },
  {
    "path": "tests/issue0051.h",
    "content": "#define FOO 1\n\nvoid my_func1();\nvoid my_func2();\nvoid my_func3();\n"
  },
  {
    "path": "tests/issue0051.py",
    "content": "\nimport unittest\nimport sys, os\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import CmdPreprocessor\n        p = CmdPreprocessor(['pcpp'] + self.options + [\n                             '-o', 'tests/issue0051.i',\n                             'tests/issue0051.c'])\n        with open('tests/issue0051.i', 'rt') as ih:\n            output = ih.read()\n        os.remove('tests/issue0051.i')\n        if output != self.shouldbe:\n            print(\"Should be:\\n\" + self.shouldbe + \"EOF\\n\", file=sys.stderr)\n            print(\"\\nWas:\\n\" + output + \"EOF\\n\", file=sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(output, self.shouldbe)\n\n\nclass normal_inclusion(unittest.TestCase, runner):\n    options = []\n    shouldbe = r'''#line 3 \"tests/issue0051.h\"\nvoid my_func1();\nvoid my_func2();\nvoid my_func3();\n#line 4 \"tests/issue0051.c\"\n  TRUE\n'''\n\nclass exclude_inclusion(unittest.TestCase, runner):\n    options = ['--passthru-includes', '\"issue0051.h\"']\n    shouldbe = r'''#line 1 \"tests/issue0051.c\"\n#include \"issue0051.h\"\n\n\n  TRUE\n'''\n"
  },
  {
    "path": "tests/issue0057.h",
    "content": "headertoken\n"
  },
  {
    "path": "tests/issue0057.py",
    "content": "\nimport unittest, time\nfrom io import StringIO\nclock = time.process_time\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import Preprocessor, OutputDirective, Action\n        import os, sys, re\n\n        class PassThruPreprocessor(Preprocessor):\n            def __init__(self):\n                super(PassThruPreprocessor, self).__init__()\n                self.passthru_includes = re.compile('.*/issue0057.*')\n            def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n                raise OutputDirective(Action.IgnoreAndPassThrough)\n\n        start = clock()\n        p = PassThruPreprocessor()\n        p.parse(self.input)\n        oh = StringIO()\n        p.write(oh)\n        end = clock()\n        print(\"Preprocessed test in\", end-start, \"seconds\")\n        if oh.getvalue() != self.output:\n            print(\"Should be:\\n\" + self.output + \"EOF\\n\", file = sys.stderr)\n            print(\"\\nWas:\\n\" + oh.getvalue()+\"EOF\\n\", file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(oh.getvalue(), self.output)\n\n            \nclass newline_after_passthru_include1(unittest.TestCase, runner):\n    input = r\"\"\"#include \"tests/issue0057.h\"\nfiletoken1\n#include \"tests/issue0057.h\"\nfiletoken2\n\"\"\"\n    output = r\"\"\"#include \"tests/issue0057.h\"\nfiletoken1\n#include \"tests/issue0057.h\"\nfiletoken2\n\"\"\"\n\nclass newline_after_passthru_include2(unittest.TestCase, runner):\n    input = r\"\"\"#include \"tests/issue0057x.h\"\nfiletoken1\n#include \"tests/issue0057x.h\"\nfiletoken2\n\"\"\"\n    output = r\"\"\"#include \"tests/issue0057x.h\"\nfiletoken1\n#include \"tests/issue0057x.h\"\nfiletoken2\n\"\"\"\n\nclass newline_after_passthru_include3(unittest.TestCase, runner):\n    input = r\"\"\"#include \"unfoundfile\"\nfiletoken1\n#include \"unfoundfile\"\nfiletoken2\n\"\"\"\n    output = r\"\"\"#include \"unfoundfile\"\nfiletoken1\n#include \"unfoundfile\"\nfiletoken2\n\"\"\"\n"
  },
  {
    "path": "tests/issue0059.py",
    "content": "\nimport unittest, time\nfrom io import StringIO\nclock = time.process_time\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import Preprocessor, OutputDirective, Action\n        import os, sys, re\n\n        class PassThruPreprocessor(Preprocessor):\n            def __init__(self):\n                super(PassThruPreprocessor, self).__init__()\n                self.passthru_includes = re.compile('.*tests/issue0059[a-z]\\\\.h\"')\n            def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n                raise OutputDirective(Action.IgnoreAndPassThrough)\n\n        start = clock()\n        p = PassThruPreprocessor()\n        p.parse(self.input)\n        oh = StringIO()\n        p.write(oh)\n        end = clock()\n        print(\"Preprocessed test in\", end-start, \"seconds\")\n        if oh.getvalue() != self.output:\n            print(\"Should be:\\n\" + self.output + \"EOF\\n\", file = sys.stderr)\n            print(\"\\nWas:\\n\" + oh.getvalue()+\"EOF\\n\", file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(oh.getvalue(), self.output)\n\n            \nclass multiple_inclusion(unittest.TestCase, runner):\n    input = r\"\"\"#include \"tests/issue0059a.h\"\nFOO\n#include \"tests/issue0059b.h\"\nFOO\n#include \"tests/issue0059a.h\"\nFOO\n#include \"tests/issue0059b.h\"\nFOO\n#include \"tests/issue0059a.h\"\nFOO\n#include \"tests/issue0059b.h\"\nFOO\n\"\"\"\n    output = r\"\"\"#include \"tests/issue0059a.h\"\n0\n#include \"tests/issue0059b.h\"\nFOO\n#include \"tests/issue0059a.h\"\nFOO\n#include \"tests/issue0059b.h\"\nFOO\n#include \"tests/issue0059a.h\"\nFOO\n#include \"tests/issue0059b.h\"\nFOO\n\"\"\"\n"
  },
  {
    "path": "tests/issue0059a.h",
    "content": "#pragma once\n#define FOO __COUNTER__\n\n"
  },
  {
    "path": "tests/issue0059b.h",
    "content": "#undef FOO\n"
  },
  {
    "path": "tests/issue0063.c",
    "content": "#define x\\\n\\\n\n#include \"issue0063.h\"\n\n#undef x\n#define x\\\n \\\n\n#include \"issue0063.h\"\n"
  },
  {
    "path": "tests/issue0063.h",
    "content": "int f();\n"
  },
  {
    "path": "tests/issue0063.py",
    "content": "\nimport unittest\nimport sys, os\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import CmdPreprocessor\n        p = CmdPreprocessor(['pcpp'] + self.options + [\n                             '-o', 'tests/issue0063.i',\n                             'tests/issue0063.c'])\n        with open('tests/issue0063.i', 'rt') as ih:\n            output = ih.read()\n        os.remove('tests/issue0063.i')\n        if output != self.shouldbe:\n            print(\"Should be:\\n\" + self.shouldbe + \"EOF\\n\", file=sys.stderr)\n            print(\"\\nWas:\\n\" + output + \"EOF\\n\", file=sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(output, self.shouldbe)\n\n\nclass include_after_continued_macro1(unittest.TestCase, runner):\n    options = []\n    shouldbe = r'''#line 1 \"tests/issue0063.h\"\nint f();\nint f();\n'''\n\nclass include_after_continued_macro2(unittest.TestCase, runner):\n    options = [ '--passthru-defines' ]\n    shouldbe = r'''#line 1 \"tests/issue0063.c\"\n#define x\n#line 1 \"tests/issue0063.h\"\nint f();\n#line 6 \"tests/issue0063.c\"\n#undef x\n#define x\n#line 1 \"tests/issue0063.h\"\nint f();\n'''\n\nclass include_after_continued_macro3(unittest.TestCase, runner):\n    options = [ '--line-directive=', '--passthru-defines' ]\n    shouldbe = r'''#define x\nint f();\n#undef x\n#define x\nint f();\n'''\n"
  },
  {
    "path": "tests/issue0079.py",
    "content": "import unittest, time\nfrom io import StringIO\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import Preprocessor, OutputDirective, Action\n        import os, sys, re\n\n        p = Preprocessor()\n        p.parse(self.input)\n        oh = StringIO()\n        p.write(oh)\n        if oh.getvalue() != self.output:\n            print(\"Should be:\\n\" + self.output + \"EOF\\n\", file = sys.stderr)\n            print(\"\\nWas:\\n\" + oh.getvalue()+\"EOF\\n\", file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(oh.getvalue(), self.output)\n\nclass pp_number_pasting(unittest.TestCase, runner):\n    input = r\"\"\"#define CAT_(A,B)A##B\n#define CAT(A,B)CAT_(A,B)\n#define Ox 0x\n#if CAT(Ox,1)\nPP_NUMBER pasted ok\n#else\nPP_NUMBER paste fail\n#endif\n\"\"\"\n    output = r\"\"\"\n\n\n\nPP_NUMBER pasted ok\n\"\"\""
  },
  {
    "path": "tests/issue0098/dir1/header.h",
    "content": "header1\n#include_next \"header.h\"\n"
  },
  {
    "path": "tests/issue0098/dir2/header.h",
    "content": "header2\n#include_next \"header.h\"\n"
  },
  {
    "path": "tests/issue0098/dir3/header.h",
    "content": "header3\n#include_next \"header.h\"\n"
  },
  {
    "path": "tests/issue0098/dir4/header.h",
    "content": "header4\n"
  },
  {
    "path": "tests/issue0098.py",
    "content": "\nimport unittest, sys\nfrom io import StringIO\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import Preprocessor\n        p = Preprocessor()\n        p.include_next_enabled = True\n        \n        # Add the test directories to the search path\n        p.add_path('tests/issue0098/dir1')\n        p.add_path('tests/issue0098/dir2')\n        p.add_path('tests/issue0098/dir3')\n        p.add_path('tests/issue0098/dir4')\n\n        p.parse(self.input)        \n        output = StringIO()\n        p.write(output)\n        if output.getvalue() != self.shouldbe:\n            print(\"Should be:\\n\" + self.shouldbe + \"EOF\\n\", file=sys.stderr)\n            print(\"\\nWas:\\n\" + output.getvalue() + \"EOF\\n\", file=sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(output.getvalue(), self.shouldbe)\n\n\nclass include_next_works(unittest.TestCase, runner):\n    input = r'''#include_next \"header.h\"\n'''\n    shouldbe = r'''#line 1 \"tests/issue0098/dir1/header.h\"\nheader1\n#line 1 \"tests/issue0098/dir2/header.h\"\nheader2\n#line 1 \"tests/issue0098/dir3/header.h\"\nheader3\n#line 1 \"tests/issue0098/dir4/header.h\"\nheader4\n'''\n\nclass has_include_works(unittest.TestCase, runner):\n    input = r'''#ifdef __has_include\nifdef\n#endif\n#ifndef __has_include\nifndef\n#endif\n#if defined(__has_include)\ndefined\n#endif\n#if __has_include(\"header.h\")\nheader\n#endif\n'''\n    shouldbe = r'''\nifdef\n\n\n\n\n\ndefined\n\n\nheader\n'''\n\nif __name__ == '__main__':\n    unittest.main()"
  },
  {
    "path": "tests/issue0103.py",
    "content": "\nimport unittest, sys\nfrom io import StringIO\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import Preprocessor\n        p = Preprocessor()\n        p.parse(self.input)        \n        output = StringIO()\n        p.write(output)\n        if output.getvalue() != self.shouldbe:\n            print(\"Should be:\\n\" + self.shouldbe + \"EOF\\n\", file=sys.stderr)\n            print(\"\\nWas:\\n\" + output.getvalue() + \"EOF\\n\", file=sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(output.getvalue(), self.shouldbe)\n\n\nclass multiline_char_literals1(unittest.TestCase, runner):\n    input = r'''#if 'N\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n' == 78\nFOO\n#endif\n'''\n    shouldbe = r'''#line 10\nFOO\n'''\n\nclass multiline_char_literals2(unittest.TestCase, runner):\n    input = r'''#if '\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\nN' == 78\nFOO\n#endif\n'''\n    shouldbe = r'''#line 10\nFOO\n'''\n\nif __name__ == '__main__':\n    unittest.main()"
  },
  {
    "path": "tests/n_std-clang.i",
    "content": "# 1 \"tests/test-c/n_std.c\"\n# 1 \"<built-in>\" 1\n# 1 \"<built-in>\" 3\n# 312 \"<built-in>\" 3\n# 1 \"<command line>\" 1\n# 1 \"<built-in>\" 2\n# 1 \"tests/test-c/n_std.c\" 2\n# 18 \"tests/test-c/n_std.c\"\n# 1 \"tests/test-c/defs.h\" 1\n# 19 \"tests/test-c/n_std.c\" 2\n# 30 \"tests/test-c/n_std.c\"\nvoid n_1( void);\nvoid n_2( void);\nvoid n_3( void);\nvoid n_4( void);\nvoid n_5( void);\nvoid n_6( void);\nvoid n_7( void);\nvoid n_9( void);\nvoid n_10( void);\nvoid n_11( void);\nvoid n_12( void);\nvoid n_13( void);\nvoid n_13_5( void);\nvoid n_13_7( void);\nvoid n_13_8( void);\nvoid n_13_13( void);\nvoid n_15( void);\nvoid n_18( void);\nvoid n_19( void);\nvoid n_20( void);\nvoid n_21( void);\nvoid n_22( void);\nvoid n_23( void);\nvoid n_24( void);\nvoid n_25( void);\nvoid n_26( void);\nvoid n_27( void);\nvoid n_28( void);\nvoid n_29( void);\nvoid n_30( void);\nvoid n_32( void);\nvoid n_37( void);\n\nint main( void)\n{\n\n    n_2();\n    n_3();\n\n    n_5();\n    n_6();\n    n_7();\n    n_9();\n    n_10();\n    n_11();\n    n_12();\n    n_13();\n    n_13_5();\n    n_13_7();\n    n_13_8();\n    n_13_13();\n    n_15();\n    n_18();\n    n_19();\n    n_20();\n    n_21();\n    n_22();\n    n_23();\n    n_24();\n    n_25();\n    n_26();\n    n_27();\n    n_28();\n    n_29();\n    n_30();\n    n_32();\n    n_37();\n    puts( \"<End of \\\"n_std.c\\\">\");\n    return 0;\n}\n\nchar quasi_trigraph[] = { '?', '?', ' ', '?', '?', '?', ' '\n            , '?', '?', '%', ' ', '?', '?', '^', ' ', '?', '#', '\\0' };\n\nvoid n_2( void)\n\n{\n    int ab = 1, cd = 2, ef = 3, abcde = 5;\n\n\n\n\n\n    assert( ab + cd + ef == 6);\n# 122 \"tests/test-c/n_std.c\"\n    assert (ab + cd + ef == 6);\n\n\n    assert (strcmp( \"abcde\", \"abcde\") == 0);\n\n\n\n    assert( abcde == 5);\n\n\n}\n\nvoid n_3( void)\n\n{\n    int abcd = 4;\n\n\n    assert( strcmp( \"abc de\", \"abc de\") == 0);\n# 156 \"tests/test-c/n_std.c\"\n    assert( abcd == 4);\n}\n\nvoid n_5( void)\n\n\n\n{\n    int abcde = 5;\n\n\n    assert( abcde == 5);\n}\n# 178 \"tests/test-c/n_std.c\"\nvoid n_6( void)\n\n{\n    int abc = 3;\n\n    assert( isalpha( 'a'));\n\n\n\n\n# 1 \"tests/test-c/header.h\" 1\n# 188 \"tests/test-c/n_std.c\" 2\n assert( abc == 3);\n\n\n\n\n# 1 \"tests/test-c/header.h\" 1\n# 193 \"tests/test-c/n_std.c\" 2\n assert( abc == 3);\n}\n\nvoid n_7( void)\n\n{\n# 1234 \"cpp\"\n assert( 1234 == 1234);\n    assert( strcmp( \"cpp\", \"cpp\") == 0);\n# 2345 \"cpp\"\n assert( 2345 == 2345);\n    assert( strcmp( \"cpp\", \"cpp\") == 0);\n# 1234 \"n_7.c\"\n assert( 1234 == 1234);\n    assert( strcmp( \"n_7.c\", \"n_7.c\") == 0);\n}\n# 218 \"n_std.c\"\n\nvoid n_9( void)\n\n{\n\n\n\n#pragma who knows ?\n}\n\nvoid n_10( void)\n\n{\n\n\n\n\n\n\n\n    assert( 1);\n# 251 \"n_std.c\"\n}\n\nvoid n_11( void)\n\n{\n    int abc = 1, a = 0;\n\n\n\n\n\n\n\n    assert( abc);\n\n\n    assert( abc);\n# 279 \"n_std.c\"\n}\n\n\n\n\n\nvoid n_12( void)\n\n{\n\n\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n\n\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n# 325 \"n_std.c\"\n}\n\nvoid n_13( void)\n# 343 \"n_std.c\"\n{\n# 363 \"n_std.c\"\n}\n\nvoid n_13_5( void)\n\n{\n# 387 \"n_std.c\"\n}\n\nvoid n_13_7( void)\n\n{\n# 406 \"n_std.c\"\n}\n\nvoid n_13_8( void)\n\n{\n# 439 \"n_std.c\"\n}\n\nvoid n_13_13( void)\n\n{\n# 466 \"n_std.c\"\n}\n\nvoid n_15( void)\n\n{\n\n\n\n    assert( 1);\n# 483 \"n_std.c\"\n    assert( 1);\n\n}\n\nvoid n_18( void)\n\n\n\n\n{\n    int c = 3;\n\n\n    assert( (1-1) == 0);\n\n\n\n\n\n    assert( ( c ) == 3);\n\n\n\n    assert( strcmp( \"n1:n2\", \"n1:n2\") == 0);\n}\n\nvoid n_19( void)\n\n{\n    int c = 1;\n# 521 \"n_std.c\"\n    assert( ( c ) == 1);\n}\n\nvoid n_20( void)\n\n{\n\n\n    double fl;\n    assert( sizeof fl == sizeof (double));\n}\n\nvoid n_21( void)\n\n{\n    int a = 1, x = 2, y = -3;\n\n\n\n    assert( - - -a == -1);\n\n\n\n\n\n\n    assert( x- -y == -1);\n}\n\nvoid n_22( void)\n\n{\n\n\n\n    assert( strcmp( \"12E+EXP\", \"12E+EXP\") == 0);\n\n\n    assert( strcmp( \".2e-EXP\", \".2e-EXP\") == 0);\n\n\n\n    assert( strcmp( \"12+1\", \"12+1\") == 0);\n}\n\nvoid n_23( void)\n\n{\n    int xy = 1;\n\n\n    assert( xy == 1);\n\n\n\n\n    assert( .12e+2 == 12.0);\n}\n\nvoid n_24( void)\n\n{\n\n    assert( strcmp( \"a+b\", \"a+b\") == 0);\n\n\n\n    assert( strcmp( \"ab + cd\", \"ab + cd\") == 0);\n\n\n\n\n    assert( strcmp( \"'\\\"' + \\\"' \\\\\\\"\\\"\", \"'\\\"' + \\\"' \\\\\\\"\\\"\") == 0);\n\n\n\n    assert( strcmp( \"\\\"abc\\\"\", \"\\\"abc\\\"\") == 0);\n\n\n\n\n\n    assert( strcmp( \"x-y\", \"x-y\") == 0);\n}\n\nvoid n_25( void)\n\n\n\n{\n    int a = 1, b = 2, abc = 3, MACRO_0MACRO_1 = 2;\n\n\n\n\n\n\n    assert( (a,b - 1) == 1);\n\n\n    assert( ( - a) == -1);\n\n\n    assert( abc == 3);\n\n\n    assert( MACRO_0MACRO_1 == 2);\n\n\n    assert( strcmp( \"ZERO_TOKEN\", \"ZERO_TOKEN\") == 0);\n}\n# 648 \"n_std.c\"\nint f( int a)\n{\n    return a;\n}\n\nint g( int a)\n{\n    return a * 2;\n}\n\n\nvoid n_26( void)\n\n{\n    int x = 1;\n    int AB = 1;\n    int Z[1];\n    Z[0] = 1;\n\n\n\n\n    assert( Z[0] == 1);\n\n\n\n\n\n    assert( AB == 1);\n\n\n\n\n    assert( x + f(x) == 2);\n\n\n\n\n\n    assert( x + x + g( x) == 4);\n\n\n\n    assert( Z[0] + f(Z[0]) == 2);\n}\n\nvoid n_27( void)\n\n\n\n\n{\n    int a = 1, b = 2, c, m = 1, n = 2;\n# 711 \"n_std.c\"\n    assert( 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 == 36);\n\n\n\n\n\n\n    assert( (1) + (1 + 2) + 1 + 2 + 1 + 2 + 3 + 1 + 2 + 3 + 4 == 23);\n\n\n    assert( 1 == 1);\n\n\n\n\n\n\n    assert( ((a) - (b)) == -1);\n\n\n    c = (a - b);\n    assert( c == -1);\n\n\n\n\n\n    assert( n == 2);\n}\n\nvoid n_28( void)\n\n\n{\n    char * date = \"Jan 13 2020\";\n\n\n    assert( strcmp( \"n_std.c\", \"n_std.c\") == 0);\n\n\n    assert( 751 == 779);\n\n\n    assert( strlen( \"Jan 13 2020\") == 11);\n    assert( date[ 4] != '0');\n\n\n    assert( strlen( \"10:47:38\") == 8);\n\n\n    assert( 1);\n\n\n    assert( 199901L >= 199409L);\n\n\n\n# 1 \"tests/test-c/line.h\" 1\n\n\n{\n    char * file = \"tests/test-c/line.h\";\n    file += strlen( file) - 6;\n    assert( 6 == 6 && strcmp( file, \"line.h\") == 0);\n}\n# 768 \"n_std.c\" 2\n}\n\nvoid n_29( void)\n\n{\n    int DEFINED = 1;\n\n\n\n\n    assert( DEFINED == 1);\n\n\n\n}\n\nvoid n_30( void)\n\n\n\n\n\n\n{\n\n\n\n    int a = 1, b = 2, c = 3;\n\n\n    assert\n    (\n        a + b + c\n\n\n\n\n\n        == 6\n    );\n}\n\nvoid n_32( void)\n\n{\n# 822 \"n_std.c\"\n}\n\nvoid n_37( void)\n\n{\n\n\n\n\n\n\n    int ABCDEFGHIJKLMNOPQRSTUVWXYZabcde = 31;\n    int ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ = 30;\n    int nest = 0;\n\n\n    assert(\n        ABCDEFGHIJKLMNOPQRSTUVWXYZabcde\n\n        == 31);\n\n\n\n    assert( ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ == 30);\n\n\n    nest = 0;\n# 865 \"n_std.c\"\n                                nest = 8;\n# 874 \"n_std.c\"\n    assert( nest == 8);\n\n\n    nest = 0;\n\n# 1 \"tests/test-c/nest1.h\" 1\n\n\n    nest = 1;\n\n\n# 1 \"tests/test-c/nest2.h\" 1\n\n\n    nest = 2;\n\n\n# 1 \"tests/test-c/nest3.h\" 1\n\n\n    nest = 3;\n\n\n# 1 \"tests/test-c/nest4.h\" 1\n\n\n    nest = 4;\n\n\n# 1 \"tests/test-c/nest5.h\" 1\n\n\n    nest = 5;\n\n\n# 1 \"tests/test-c/nest6.h\" 1\n\n\n    nest = 6;\n\n\n# 1 \"tests/test-c/nest7.h\" 1\n\n\n    nest = 7;\n\n\n# 1 \"tests/test-c/nest8.h\" 1\n\n\n\n    nest = 8;\n# 6 \"tests/test-c/nest7.h\" 2\n# 6 \"tests/test-c/nest6.h\" 2\n# 6 \"tests/test-c/nest5.h\" 2\n# 6 \"tests/test-c/nest4.h\" 2\n# 6 \"tests/test-c/nest3.h\" 2\n# 6 \"tests/test-c/nest2.h\" 2\n# 6 \"tests/test-c/nest1.h\" 2\n# 879 \"n_std.c\" 2\n assert( nest == 8);\n\n\n\n\n\n\n    nest = 32;\n\n    assert( nest == 32);\n\n\n    {\n        char * extremely_long_string =\n\"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567\"\n\n\n\n\n\n\n\n        ;\n        assert( strlen( extremely_long_string) == 507);\n    }\n\n\n    {\n    int a123456789012345678901234567890 = 123450; int b123456789012345678901234567890 = 123451; int c123456789012345678901234567890 = 123452; int d123456789012345678901234567890 = 123453; int e123456789012345678901234567890 = 123454; int f123456789012345678901234567890 = 123455; int A123456789012345678901234567890 = 123456; int B123456789012345678901234567890 = 123457; int C123456789012345678901234567890 = 123458; int D1234567890123456789012 = 123459;\n# 917 \"n_std.c\"\n        assert( a123456789012345678901234567890 == 123450\n            && D1234567890123456789012 == 123459);\n    }\n\n\n\n\n\n\n\n# 1 \"tests/test-c/m1024.h\" 1\n# 927 \"n_std.c\" 2\n assert( 1);\n}\n"
  },
  {
    "path": "tests/n_std-gcc.i",
    "content": "# 1 \"tests/test-c/n_std.c\"\n# 1 \"<built-in>\"\n# 1 \"<command-line>\"\n# 1 \"/usr/include/stdc-predef.h\" 1 3 4\n# 1 \"<command-line>\" 2\n# 1 \"tests/test-c/n_std.c\"\n# 18 \"tests/test-c/n_std.c\"\n# 1 \"tests/test-c/defs.h\" 1\n# 19 \"tests/test-c/n_std.c\" 2\n# 30 \"tests/test-c/n_std.c\"\nvoid n_1( void);\nvoid n_2( void);\nvoid n_3( void);\nvoid n_4( void);\nvoid n_5( void);\nvoid n_6( void);\nvoid n_7( void);\nvoid n_9( void);\nvoid n_10( void);\nvoid n_11( void);\nvoid n_12( void);\nvoid n_13( void);\nvoid n_13_5( void);\nvoid n_13_7( void);\nvoid n_13_8( void);\nvoid n_13_13( void);\nvoid n_15( void);\nvoid n_18( void);\nvoid n_19( void);\nvoid n_20( void);\nvoid n_21( void);\nvoid n_22( void);\nvoid n_23( void);\nvoid n_24( void);\nvoid n_25( void);\nvoid n_26( void);\nvoid n_27( void);\nvoid n_28( void);\nvoid n_29( void);\nvoid n_30( void);\nvoid n_32( void);\nvoid n_37( void);\n\nint main( void)\n{\n\n    n_2();\n    n_3();\n\n    n_5();\n    n_6();\n    n_7();\n    n_9();\n    n_10();\n    n_11();\n    n_12();\n    n_13();\n    n_13_5();\n    n_13_7();\n    n_13_8();\n    n_13_13();\n    n_15();\n    n_18();\n    n_19();\n    n_20();\n    n_21();\n    n_22();\n    n_23();\n    n_24();\n    n_25();\n    n_26();\n    n_27();\n    n_28();\n    n_29();\n    n_30();\n    n_32();\n    n_37();\n    puts( \"<End of \\\"n_std.c\\\">\");\n    return 0;\n}\n\nchar quasi_trigraph[] = { '?', '?', ' ', '?', '?', '?', ' '\n            , '?', '?', '%', ' ', '?', '?', '^', ' ', '?', '#', '\\0' };\n\nvoid n_2( void)\n\n{\n    int ab = 1, cd = 2, ef = 3, abcde = 5;\n\n\n\n\n\n    assert( ab + cd + ef == 6);\n# 122 \"tests/test-c/n_std.c\"\n    assert (ab + cd + ef == 6);\n\n\n    assert (strcmp( \"abcde\",\n     \"abcde\") == 0);\n\n\n    assert( abcde\n   == 5);\n\n}\n\nvoid n_3( void)\n\n{\n    int abcd = 4;\n\n\n    assert( strcmp( \"abc de\", \"abc de\") == 0);\n# 156 \"tests/test-c/n_std.c\"\n    assert( abcd == 4);\n}\n\nvoid n_5( void)\n\n\n\n{\n    int abcde = 5;\n\n\n    assert( abcde == 5);\n}\n# 178 \"tests/test-c/n_std.c\"\nvoid n_6( void)\n\n{\n    int abc = 3;\n\n    assert( isalpha( 'a'));\n\n\n\n# 1 \"tests/test-c/header.h\" 1\n# 188 \"tests/test-c/n_std.c\" 2\n    assert( abc == 3);\n\n\n\n# 1 \"tests/test-c/header.h\" 1\n# 193 \"tests/test-c/n_std.c\" 2\n    assert( abc == 3);\n}\n\nvoid n_7( void)\n\n{\n# 1234 \"cpp\"\n    assert( 1234 == 1234);\n    assert( strcmp( \"cpp\", \"cpp\") == 0);\n# 2345 \"cpp\"\n    assert( 2345 == 2345);\n    assert( strcmp( \"cpp\", \"cpp\") == 0);\n# 1234 \"n_7.c\"\n    assert( 1234 == 1234);\n    assert( strcmp( \"n_7.c\", \"n_7.c\") == 0);\n}\n# 218 \"n_std.c\"\n\nvoid n_9( void)\n\n{\n\n\n       \n#pragma who knows ?\n}\n\nvoid n_10( void)\n\n{\n\n\n\n\n\n\n\n    assert( 1);\n# 251 \"n_std.c\"\n}\n\nvoid n_11( void)\n\n{\n    int abc = 1, a = 0;\n\n\n\n\n\n\n\n    assert( abc);\n\n\n    assert( abc);\n# 279 \"n_std.c\"\n}\n\n\n\n\n\nvoid n_12( void)\n\n{\n\n\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n\n\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n# 325 \"n_std.c\"\n}\n\nvoid n_13( void)\n# 343 \"n_std.c\"\n{\n# 363 \"n_std.c\"\n}\n\nvoid n_13_5( void)\n\n{\n# 387 \"n_std.c\"\n}\n\nvoid n_13_7( void)\n\n{\n# 406 \"n_std.c\"\n}\n\nvoid n_13_8( void)\n\n{\n# 439 \"n_std.c\"\n}\n\nvoid n_13_13( void)\n\n{\n# 466 \"n_std.c\"\n}\n\nvoid n_15( void)\n\n{\n\n\n\n    assert( 1);\n# 483 \"n_std.c\"\n    assert( 1);\n\n}\n\nvoid n_18( void)\n\n\n\n\n{\n    int c = 3;\n\n\n    assert( (1-1) == 0);\n\n\n\n\n\n    assert( ( c ) == 3);\n\n\n\n    assert( strcmp( \"n1:n2\", \"n1:n2\") == 0);\n}\n\nvoid n_19( void)\n\n{\n    int c = 1;\n# 521 \"n_std.c\"\n    assert( ( c ) == 1);\n}\n\nvoid n_20( void)\n\n{\n\n\n    double fl;\n    assert( sizeof fl == sizeof (double));\n}\n\nvoid n_21( void)\n\n{\n    int a = 1, x = 2, y = -3;\n\n\n\n    assert( - - -a == -1);\n\n\n\n\n\n\n    assert( x- -y == -1);\n}\n\nvoid n_22( void)\n\n{\n\n\n\n    assert( strcmp( \"12E+EXP\", \"12E+EXP\") == 0);\n\n\n    assert( strcmp( \".2e-EXP\", \".2e-EXP\") == 0);\n\n\n\n    assert( strcmp( \"12+1\", \"12+1\") == 0);\n}\n\nvoid n_23( void)\n\n{\n    int xy = 1;\n\n\n    assert( xy == 1);\n\n\n\n\n    assert( .12e+2 == 12.0);\n}\n\nvoid n_24( void)\n\n{\n\n    assert( strcmp( \"a+b\", \"a+b\") == 0);\n\n\n\n    assert( strcmp( \"ab + cd\"\n             , \"ab + cd\") == 0);\n\n\n\n    assert( strcmp( \"'\\\"' + \\\"' \\\\\\\"\\\"\", \"'\\\"' + \\\"' \\\\\\\"\\\"\") == 0);\n\n\n\n    assert( strcmp( \"\\\"abc\\\"\"\n   , \"\\\"abc\\\"\") == 0);\n\n\n\n\n    assert( strcmp( \"x-y\", \"x-y\") == 0);\n}\n\nvoid n_25( void)\n\n\n\n{\n    int a = 1, b = 2, abc = 3, MACRO_0MACRO_1 = 2;\n\n\n\n\n\n\n    assert( (a,b - 1) == 1);\n\n\n    assert( ( - a) == -1);\n\n\n    assert( abc == 3);\n\n\n    assert( MACRO_0MACRO_1 == 2);\n\n\n    assert( strcmp( \"ZERO_TOKEN\", \"ZERO_TOKEN\") == 0);\n}\n# 648 \"n_std.c\"\nint f( int a)\n{\n    return a;\n}\n\nint g( int a)\n{\n    return a * 2;\n}\n\n\nvoid n_26( void)\n\n{\n    int x = 1;\n    int AB = 1;\n    int Z[1];\n    Z[0] = 1;\n\n\n\n\n    assert( Z[0] == 1);\n\n\n\n\n\n    assert( AB == 1);\n\n\n\n\n    assert( x + f(x) == 2);\n\n\n\n\n\n    assert( x + x + g( x) == 4);\n\n\n\n    assert( Z[0] + f(Z[0]) == 2);\n}\n\nvoid n_27( void)\n\n\n\n\n{\n    int a = 1, b = 2, c, m = 1, n = 2;\n# 711 \"n_std.c\"\n    assert( 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 == 36);\n\n\n\n\n\n\n    assert( (1) + (1 + 2) + 1 + 2 + 1 + 2 + 3 + 1 + 2 + 3 + 4 == 23);\n\n\n    assert( 1 == 1);\n\n\n\n\n\n\n    assert( ((a) - (b)) == -1);\n\n\n    c = (a - b);\n    assert( c == -1);\n\n\n\n\n\n    assert( n == 2);\n}\n\nvoid n_28( void)\n\n\n{\n    char * date = \"Jan 13 2020\";\n\n\n    assert( strcmp( \"n_std.c\", \"n_std.c\") == 0);\n\n\n    assert( 751 == 779);\n\n\n    assert( strlen( \"Jan 13 2020\") == 11);\n    assert( date[ 4] != '0');\n\n\n    assert( strlen( \"10:47:38\") == 8);\n\n\n    assert( 1);\n\n\n    assert( __STDC_VERSION__ >= 199409L);\n\n\n# 1 \"tests/test-c/line.h\" 1\n\n\n{\n    char * file = \"tests/test-c/line.h\";\n    file += strlen( file) - 6;\n    assert( 6 == 6 && strcmp( file, \"line.h\") == 0);\n}\n# 768 \"n_std.c\" 2\n}\n\nvoid n_29( void)\n\n{\n    int DEFINED = 1;\n\n\n\n\n    assert( DEFINED == 1);\n\n\n\n}\n\nvoid n_30( void)\n\n\n\n\n\n\n{\n\n\n\n    int a = 1, b = 2, c = 3;\n\n\n    assert\n    (\n        a + b + c\n\n\n\n\n\n        == 6\n    );\n}\n\nvoid n_32( void)\n\n{\n# 822 \"n_std.c\"\n}\n\nvoid n_37( void)\n\n{\n\n\n\n\n\n\n    int ABCDEFGHIJKLMNOPQRSTUVWXYZabcde = 31;\n    int ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ = 30;\n    int nest = 0;\n\n\n    assert(\n        ABCDEFGHIJKLMNOPQRSTUVWXYZabcde\n\n        == 31);\n\n\n\n    assert( ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ == 30);\n\n\n    nest = 0;\n# 865 \"n_std.c\"\n                                nest = 8;\n# 874 \"n_std.c\"\n    assert( nest == 8);\n\n\n    nest = 0;\n# 1 \"tests/test-c/nest1.h\" 1\n\n\n    nest = 1;\n\n# 1 \"tests/test-c/nest2.h\" 1\n\n\n    nest = 2;\n\n# 1 \"tests/test-c/nest3.h\" 1\n\n\n    nest = 3;\n\n# 1 \"tests/test-c/nest4.h\" 1\n\n\n    nest = 4;\n\n# 1 \"tests/test-c/nest5.h\" 1\n\n\n    nest = 5;\n\n# 1 \"tests/test-c/nest6.h\" 1\n\n\n    nest = 6;\n\n# 1 \"tests/test-c/nest7.h\" 1\n\n\n    nest = 7;\n\n# 1 \"tests/test-c/nest8.h\" 1\n\n\n\n    nest = 8;\n# 5 \"tests/test-c/nest7.h\" 2\n# 5 \"tests/test-c/nest6.h\" 2\n# 5 \"tests/test-c/nest5.h\" 2\n# 5 \"tests/test-c/nest4.h\" 2\n# 5 \"tests/test-c/nest3.h\" 2\n# 5 \"tests/test-c/nest2.h\" 2\n# 5 \"tests/test-c/nest1.h\" 2\n# 879 \"n_std.c\" 2\n    assert( nest == 8);\n\n\n\n\n\n\n    nest = 32;\n\n    assert( nest == 32);\n\n\n    {\n        char * extremely_long_string =\n\"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567\"\n\n\n\n\n\n\n\n        ;\n        assert( strlen( extremely_long_string) == 507);\n    }\n\n\n    {\n    int a123456789012345678901234567890 = 123450;\n    int b123456789012345678901234567890 = 123451;\n    int c123456789012345678901234567890 = 123452;\n    int d123456789012345678901234567890 = 123453;\n    int e123456789012345678901234567890 = 123454;\n    int f123456789012345678901234567890 = 123455;\n    int A123456789012345678901234567890 = 123456;\n    int B123456789012345678901234567890 = 123457;\n    int C123456789012345678901234567890 = 123458;\n    int D1234567890123456789012 = 123459;\n        assert( a123456789012345678901234567890 == 123450\n            && D1234567890123456789012 == 123459);\n    }\n\n\n\n\n\n\n# 1 \"tests/test-c/m1024.h\" 1\n# 927 \"n_std.c\" 2\n    assert( 1);\n}\n"
  },
  {
    "path": "tests/n_std-pcpp.i",
    "content": "# 30 \"tests/test-c/n_std.c\"\nvoid n_1( void);\nvoid n_2( void);\nvoid n_3( void);\nvoid n_4( void);\nvoid n_5( void);\nvoid n_6( void);\nvoid n_7( void);\nvoid n_9( void);\nvoid n_10( void);\nvoid n_11( void);\nvoid n_12( void);\nvoid n_13( void);\nvoid n_13_5( void);\nvoid n_13_7( void);\nvoid n_13_8( void);\nvoid n_13_13( void);\nvoid n_15( void);\nvoid n_18( void);\nvoid n_19( void);\nvoid n_20( void);\nvoid n_21( void);\nvoid n_22( void);\nvoid n_23( void);\nvoid n_24( void);\nvoid n_25( void);\nvoid n_26( void);\nvoid n_27( void);\nvoid n_28( void);\nvoid n_29( void);\nvoid n_30( void);\nvoid n_32( void);\nvoid n_37( void);\n\nint main( void)\n{\n\n    n_2();\n    n_3();\n\n    n_5();\n    n_6();\n    n_7();\n    n_9();\n    n_10();\n    n_11();\n    n_12();\n    n_13();\n    n_13_5();\n    n_13_7();\n    n_13_8();\n    n_13_13();\n    n_15();\n    n_18();\n    n_19();\n    n_20();\n    n_21();\n    n_22();\n    n_23();\n    n_24();\n    n_25();\n    n_26();\n    n_27();\n    n_28();\n    n_29();\n    n_30();\n    n_32();\n    n_37();\n    puts( \"<End of \\\"n_std.c\\\">\");\n    return 0;\n}\n\nchar quasi_trigraph[] = { '?', '?', ' ', '?', '?', '?', ' '\n            , '?', '?', '%', ' ', '?', '?', '^', ' ', '?', '#', '\\0' };\n\nvoid n_2( void)\n\n{\n    int ab = 1, cd = 2, ef = 3, abcde = 5;\n\n\n\n\n\n    assert( ab + cd + ef == 6);\n# 122 \"tests/test-c/n_std.c\"\n    assert (ab + cd + ef == 6);\n\n\n    assert (strcmp( \"abcde\", \"abcde\") == 0);\n\n\n\n    assert( abcde == 5);\n\n\n}\n\nvoid n_3( void)\n\n{\n    int abcd = 4;\n\n\n    assert( strcmp( \"abc de\", \"abc de\") == 0);\n# 156 \"tests/test-c/n_std.c\"\n    assert( abcd == 4);\n}\n\nvoid n_5( void)\n\n\n\n{\n    int abcde = 5;\n\n\n    assert( abcde == 5);\n}\n# 178 \"tests/test-c/n_std.c\"\nvoid n_6( void)\n\n{\n    int abc = 3;\n\n    assert( isalpha( 'a'));\n\n\n\n\n    assert( abc == 3);\n\n\n\n\n    assert( abc == 3);\n}\n\nvoid n_7( void)\n\n{\n\n#line 1234 \"cpp\"\n    assert( 201 == 1234);\n    assert( strcmp( \"tests/test-c/n_std.c\", \"cpp\") == 0);\n\n\n#line 2345\n    assert( 206 == 2345);\n    assert( strcmp( \"tests/test-c/n_std.c\", \"cpp\") == 0);\n\n\n\n#line LINE_AND_FILENAME\n    assert( 212 == 1234);\n    assert( strcmp( \"tests/test-c/n_std.c\", \"n_7.c\") == 0);\n}\n\n\n#line 218 \"n_std.c\"\n\nvoid n_9( void)\n\n{\n\n\n\n#pragma who knows ?\n}\n\nvoid n_10( void)\n\n{\n# 238 \"tests/test-c/n_std.c\"\n    assert( 1);\n# 251 \"tests/test-c/n_std.c\"\n}\n\nvoid n_11( void)\n\n{\n    int abc = 1, a = 0;\n# 264 \"tests/test-c/n_std.c\"\n    assert( abc);\n\n\n    assert( abc);\n# 279 \"tests/test-c/n_std.c\"\n}\n\n\n\n\n\nvoid n_12( void)\n\n{\n\n\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n\n\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n# 325 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13( void)\n# 343 \"tests/test-c/n_std.c\"\n{\n# 363 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13_5( void)\n\n{\n# 387 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13_7( void)\n\n{\n# 406 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13_8( void)\n\n{\n# 439 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13_13( void)\n\n{\n# 466 \"tests/test-c/n_std.c\"\n}\n\nvoid n_15( void)\n\n{\n\n\n\n    assert( 1);\n# 483 \"tests/test-c/n_std.c\"\n    assert( 1);\n\n}\n\nvoid n_18( void)\n\n\n\n\n{\n    int c = 3;\n\n\n    assert( (1-1) == 0);\n\n\n\n\n\n    assert( ( c ) == 3);\n\n\n\n    assert( strcmp( \"n1:n2\", \"n1:n2\") == 0);\n}\n\nvoid n_19( void)\n\n{\n    int c = 1;\n# 521 \"tests/test-c/n_std.c\"\n    assert( ( c ) == 1);\n}\n\nvoid n_20( void)\n\n{\n\n\n    double fl;\n    assert( sizeof fl == sizeof (double));\n}\n\nvoid n_21( void)\n\n{\n    int a = 1, x = 2, y = -3;\n\n\n\n    assert( ---a == -1);\n\n\n\n\n\n\n    assert( x--y == -1);\n}\n\nvoid n_22( void)\n\n{\n\n\n\n    assert( strcmp( \"12E+1\", \"12E+EXP\") == 0);\n\n\n    assert( strcmp( \".2e-1\", \".2e-EXP\") == 0);\n\n\n\n    assert( strcmp( \"12+1\", \"12+1\") == 0);\n}\n\nvoid n_23( void)\n\n{\n    int xy = 1;\n\n\n    assert( xy == 1);\n\n\n\n\n    assert( .12e+2 == 12.0);\n}\n\nvoid n_24( void)\n\n{\n\n    assert( strcmp( \"a+b\", \"a+b\") == 0);\n\n\n\n    assert( strcmp( \"ab + cd\", \"ab + cd\") == 0);\n\n\n\n\n    assert( strcmp( \"'\\\"' + \\\"' \\\\\\\"\\\"\", \"'\\\"' + \\\"' \\\\\\\"\\\"\") == 0);\n\n\n\n    assert( strcmp( \"\\\"abc\\\"\", \"\\\"abc\\\"\") == 0);\n\n\n\n\n\n    assert( strcmp( \"x-y\", \"x-y\") == 0);\n}\n\nvoid n_25( void)\n\n\n\n{\n    int a = 1, b = 2, abc = 3, MACRO_0MACRO_1 = 2;\n\n\n\n\n\n\n    assert( (a,b - 1) == 1);\n\n\n    assert( ( - a) == -1);\n\n\n    assert( abc == 3);\n\n\n    assert( MACRO_0MACRO_1 == 2);\n\n\n    assert( strcmp( \"ZERO_TOKEN\", \"ZERO_TOKEN\") == 0);\n}\n# 648 \"tests/test-c/n_std.c\"\nint f( int a)\n{\n    return a;\n}\n\nint g( int a)\n{\n    return a * 2;\n}\n\n\nvoid n_26( void)\n\n{\n    int x = 1;\n    int AB = 1;\n    int Z[1];\n    Z[0] = 1;\n\n\n\n\n    assert( Z[0] == 1);\n\n\n\n\n\n    assert( AB == 1);\n\n\n\n\n    assert( x + f(x) == 2);\n\n\n\n\n\n    assert( x + x + g( x) == 4);\n\n\n\n    assert( Z[0] + f(Z[0]) == 2);\n}\n\nvoid n_27( void)\n\n\n\n\n{\n    int a = 1, b = 2, c, m = 1, n = 2;\n# 711 \"tests/test-c/n_std.c\"\n    assert( 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 == 36);\n\n\n\n\n\n\n    assert( (1) + (1 + 2) + 1 + 2 + 1 + 2 + 3 + 1 + 2 + 3 + 4 == 23);\n\n\n    assert( 1 == 1);\n\n\n\n\n\n\n    assert( ((a) - (b)) == -1);\n\n\n    c = (a - b);\n    assert( c == -1);\n\n\n\n\n\n    assert( n == 2);\n}\n\nvoid n_28( void)\n\n\n{\n    char * date = \"Jan 13 2020\";\n\n\n    assert( strcmp( \"tests/test-c/n_std.c\", \"n_std.c\") == 0);\n\n\n    assert( 731 == 751);\n\n\n    assert( strlen( \"Jan 13 2020\") == 11);\n    assert( date[ 4] != '0');\n\n\n    assert( strlen( \"10:47:38\") == 8);\n\n\n    assert( 1);\n\n\n    assert( 199901L >= 199409L);\n# 3 \"tests/test-c/line.h\"\n{\n    char * file = \"tests/test-c/line.h\";\n    file += strlen( file) - 6;\n    assert( 731 == 6 && strcmp( file, \"line.h\") == 0);\n}\n# 768 \"tests/test-c/n_std.c\"\n}\n\nvoid n_29( void)\n\n{\n    int DEFINED = 1;\n\n\n\n\n    assert( DEFINED == 1);\n\n\n\n}\n\nvoid n_30( void)\n\n\n\n\n\n\n{\n\n\n\n    int a = 1, b = 2, c = 3;\n\n\n    assert\n    (\n        a + b + c\n\n\n\n\n\n        == 6\n    );\n}\n\nvoid n_32( void)\n\n{\n# 822 \"tests/test-c/n_std.c\"\n}\n\nvoid n_37( void)\n\n{\n\n\n\n\n\n\n    int ABCDEFGHIJKLMNOPQRSTUVWXYZabcde = 31;\n    int ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ = 30;\n    int nest = 0;\n\n\n    assert(\n        ABCDEFGHIJKLMNOPQRSTUVWXYZabcde\n\n        == 31);\n\n\n\n    assert( ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ == 30);\n\n\n    nest = 0;\n# 865 \"tests/test-c/n_std.c\"\n                                nest = 8;\n# 874 \"tests/test-c/n_std.c\"\n    assert( nest == 8);\n\n\n    nest = 0;\n# 3 \"tests/test-c/nest1.h\"\n    nest = 1;\n# 3 \"tests/test-c/nest2.h\"\n    nest = 2;\n# 3 \"tests/test-c/nest3.h\"\n    nest = 3;\n# 3 \"tests/test-c/nest4.h\"\n    nest = 4;\n# 3 \"tests/test-c/nest5.h\"\n    nest = 5;\n# 3 \"tests/test-c/nest6.h\"\n    nest = 6;\n# 3 \"tests/test-c/nest7.h\"\n    nest = 7;\n# 4 \"tests/test-c/nest8.h\"\n    nest = 8;\n# 879 \"tests/test-c/n_std.c\"\n    assert( nest == 8);\n\n\n\n\n\n\n    nest = 32;\n\n    assert( nest == 32);\n\n\n    {\n        char * extremely_long_string =\n\"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567\"\n# 901 \"tests/test-c/n_std.c\"\n        ;\n        assert( strlen( extremely_long_string) == 507);\n    }\n\n\n    {\n    int a123456789012345678901234567890 = 123450; int b123456789012345678901234567890 = 123451; int c123456789012345678901234567890 = 123452; int d123456789012345678901234567890 = 123453; int e123456789012345678901234567890 = 123454; int f123456789012345678901234567890 = 123455; int A123456789012345678901234567890 = 123456; int B123456789012345678901234567890 = 123457; int C123456789012345678901234567890 = 123458; int D1234567890123456789012 = 123459;\n# 917 \"tests/test-c/n_std.c\"\n        assert( a123456789012345678901234567890 == 123450\n            && D1234567890123456789012 == 123459);\n    }\n# 927 \"tests/test-c/n_std.c\"\n    assert( 1);\n}\n"
  },
  {
    "path": "tests/n_std.i",
    "content": "# 30 \"tests/test-c/n_std.c\"\nvoid n_1( void);\nvoid n_2( void);\nvoid n_3( void);\nvoid n_4( void);\nvoid n_5( void);\nvoid n_6( void);\nvoid n_7( void);\nvoid n_9( void);\nvoid n_10( void);\nvoid n_11( void);\nvoid n_12( void);\nvoid n_13( void);\nvoid n_13_5( void);\nvoid n_13_7( void);\nvoid n_13_8( void);\nvoid n_13_13( void);\nvoid n_15( void);\nvoid n_18( void);\nvoid n_19( void);\nvoid n_20( void);\nvoid n_21( void);\nvoid n_22( void);\nvoid n_23( void);\nvoid n_24( void);\nvoid n_25( void);\nvoid n_26( void);\nvoid n_27( void);\nvoid n_28( void);\nvoid n_29( void);\nvoid n_30( void);\nvoid n_32( void);\nvoid n_37( void);\n\nint main( void)\n{\n\n    n_2();\n    n_3();\n\n    n_5();\n    n_6();\n    n_7();\n    n_9();\n    n_10();\n    n_11();\n    n_12();\n    n_13();\n    n_13_5();\n    n_13_7();\n    n_13_8();\n    n_13_13();\n    n_15();\n    n_18();\n    n_19();\n    n_20();\n    n_21();\n    n_22();\n    n_23();\n    n_24();\n    n_25();\n    n_26();\n    n_27();\n    n_28();\n    n_29();\n    n_30();\n    n_32();\n    n_37();\n    puts( \"<End of \\\"n_std.c\\\">\");\n    return 0;\n}\n\nchar quasi_trigraph[] = { '?', '?', ' ', '?', '?', '?', ' '\n            , '?', '?', '%', ' ', '?', '?', '^', ' ', '?', '#', '\\0' };\n\nvoid n_2( void)\n\n{\n    int ab = 1, cd = 2, ef = 3, abcde = 5;\n\n\n\n\n\n    assert( ab + cd + ef == 6);\n# 122 \"tests/test-c/n_std.c\"\n    assert (ab + cd + ef == 6);\n\n\n    assert (strcmp( \"abcde\", \"abcde\") == 0);\n\n\n\n    assert( abcde == 5);\n\n\n}\n\nvoid n_3( void)\n\n{\n    int abcd = 4;\n\n\n    assert( strcmp( \"abc de\", \"abc de\") == 0);\n# 156 \"tests/test-c/n_std.c\"\n    assert( abcd == 4);\n}\n\nvoid n_5( void)\n\n\n\n{\n    int abcde = 5;\n\n\n    assert( abcde == 5);\n}\n# 178 \"tests/test-c/n_std.c\"\nvoid n_6( void)\n\n{\n    int abc = 3;\n\n    assert( isalpha( 'a'));\n\n\n\n\n    assert( abc == 3);\n\n\n\n\n    assert( abc == 3);\n}\n\nvoid n_7( void)\n\n{\n\n#line 1234 \"cpp\"\n    assert( 201 == 1234);\n    assert( strcmp( \"tests/test-c/n_std.c\", \"cpp\") == 0);\n\n\n#line 2345\n    assert( 206 == 2345);\n    assert( strcmp( \"tests/test-c/n_std.c\", \"cpp\") == 0);\n\n\n\n#line LINE_AND_FILENAME\n    assert( 212 == 1234);\n    assert( strcmp( \"tests/test-c/n_std.c\", \"n_7.c\") == 0);\n}\n\n\n#line 218 \"n_std.c\"\n\nvoid n_9( void)\n\n{\n\n\n\n#pragma who knows ?\n}\n\nvoid n_10( void)\n\n{\n# 238 \"tests/test-c/n_std.c\"\n    assert( 1);\n# 251 \"tests/test-c/n_std.c\"\n}\n\nvoid n_11( void)\n\n{\n    int abc = 1, a = 0;\n# 264 \"tests/test-c/n_std.c\"\n    assert( abc);\n\n\n    assert( abc);\n# 279 \"tests/test-c/n_std.c\"\n}\n\n\n\n\n\nvoid n_12( void)\n\n{\n\n\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n\n\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n# 325 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13( void)\n# 343 \"tests/test-c/n_std.c\"\n{\n# 363 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13_5( void)\n\n{\n# 387 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13_7( void)\n\n{\n# 406 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13_8( void)\n\n{\n# 439 \"tests/test-c/n_std.c\"\n}\n\nvoid n_13_13( void)\n\n{\n# 466 \"tests/test-c/n_std.c\"\n}\n\nvoid n_15( void)\n\n{\n\n\n\n    assert( 1);\n# 483 \"tests/test-c/n_std.c\"\n    assert( 1);\n\n}\n\nvoid n_18( void)\n\n\n\n\n{\n    int c = 3;\n\n\n    assert( (1-1) == 0);\n\n\n\n\n\n    assert( ( c ) == 3);\n\n\n\n    assert( strcmp( \"n1:n2\", \"n1:n2\") == 0);\n}\n\nvoid n_19( void)\n\n{\n    int c = 1;\n# 521 \"tests/test-c/n_std.c\"\n    assert( ( c ) == 1);\n}\n\nvoid n_20( void)\n\n{\n\n\n    double fl;\n    assert( sizeof fl == sizeof (double));\n}\n\nvoid n_21( void)\n\n{\n    int a = 1, x = 2, y = -3;\n\n\n\n    assert( ---a == -1);\n\n\n\n\n\n\n    assert( x--y == -1);\n}\n\nvoid n_22( void)\n\n{\n\n\n\n    assert( strcmp( \"12E+1\", \"12E+EXP\") == 0);\n\n\n    assert( strcmp( \".2e-1\", \".2e-EXP\") == 0);\n\n\n\n    assert( strcmp( \"12+1\", \"12+1\") == 0);\n}\n\nvoid n_23( void)\n\n{\n    int xy = 1;\n\n\n    assert( xy == 1);\n\n\n\n\n    assert( .12e+2 == 12.0);\n}\n\nvoid n_24( void)\n\n{\n\n    assert( strcmp( \"a+b\", \"a+b\") == 0);\n\n\n\n    assert( strcmp( \"ab + cd\", \"ab + cd\") == 0);\n\n\n\n\n    assert( strcmp( \"'\\\"' + \\\"' \\\\\\\"\\\"\", \"'\\\"' + \\\"' \\\\\\\"\\\"\") == 0);\n\n\n\n    assert( strcmp( \"\\\"abc\\\"\", \"\\\"abc\\\"\") == 0);\n\n\n\n\n\n    assert( strcmp( \"x-y\", \"x-y\") == 0);\n}\n\nvoid n_25( void)\n\n\n\n{\n    int a = 1, b = 2, abc = 3, MACRO_0MACRO_1 = 2;\n\n\n\n\n\n\n    assert( (a,b - 1) == 1);\n\n\n    assert( ( - a) == -1);\n\n\n    assert( abc == 3);\n\n\n    assert( MACRO_0MACRO_1 == 2);\n\n\n    assert( strcmp( \"ZERO_TOKEN\", \"ZERO_TOKEN\") == 0);\n}\n# 648 \"tests/test-c/n_std.c\"\nint f( int a)\n{\n    return a;\n}\n\nint g( int a)\n{\n    return a * 2;\n}\n\n\nvoid n_26( void)\n\n{\n    int x = 1;\n    int AB = 1;\n    int Z[1];\n    Z[0] = 1;\n\n\n\n\n    assert( Z[0] == 1);\n\n\n\n\n\n    assert( AB == 1);\n\n\n\n\n    assert( x + f(x) == 2);\n\n\n\n\n\n    assert( x + x + g( x) == 4);\n\n\n\n    assert( Z[0] + f(Z[0]) == 2);\n}\n\nvoid n_27( void)\n\n\n\n\n{\n    int a = 1, b = 2, c, m = 1, n = 2;\n# 711 \"tests/test-c/n_std.c\"\n    assert( 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 == 36);\n\n\n\n\n\n\n    assert( (1) + (1 + 2) + 1 + 2 + 1 + 2 + 3 + 1 + 2 + 3 + 4 == 23);\n\n\n    assert( 1 == 1);\n\n\n\n\n\n\n    assert( ((a) - (b)) == -1);\n\n\n    c = (a - b);\n    assert( c == -1);\n\n\n\n\n\n    assert( n == 2);\n}\n\nvoid n_28( void)\n\n\n{\n    char * date = \"Jan 13 2020\";\n\n\n    assert( strcmp( \"tests/test-c/n_std.c\", \"n_std.c\") == 0);\n\n\n    assert( 731 == 751);\n\n\n    assert( strlen( \"Jan 13 2020\") == 11);\n    assert( date[ 4] != '0');\n\n\n    assert( strlen( \"10:47:38\") == 8);\n\n\n    assert( 1);\n\n\n    assert( 199901L >= 199409L);\n# 3 \"tests/test-c/line.h\"\n{\n    char * file = \"tests/test-c/line.h\";\n    file += strlen( file) - 6;\n    assert( 731 == 6 && strcmp( file, \"line.h\") == 0);\n}\n# 768 \"tests/test-c/n_std.c\"\n}\n\nvoid n_29( void)\n\n{\n    int DEFINED = 1;\n\n\n\n\n    assert( DEFINED == 1);\n\n\n\n}\n\nvoid n_30( void)\n\n\n\n\n\n\n{\n\n\n\n    int a = 1, b = 2, c = 3;\n\n\n    assert\n    (\n        a + b + c\n\n\n\n\n\n        == 6\n    );\n}\n\nvoid n_32( void)\n\n{\n# 822 \"tests/test-c/n_std.c\"\n}\n\nvoid n_37( void)\n\n{\n\n\n\n\n\n\n    int ABCDEFGHIJKLMNOPQRSTUVWXYZabcde = 31;\n    int ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ = 30;\n    int nest = 0;\n\n\n    assert(\n        ABCDEFGHIJKLMNOPQRSTUVWXYZabcde\n\n        == 31);\n\n\n\n    assert( ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ == 30);\n\n\n    nest = 0;\n# 865 \"tests/test-c/n_std.c\"\n                                nest = 8;\n# 874 \"tests/test-c/n_std.c\"\n    assert( nest == 8);\n\n\n    nest = 0;\n# 3 \"tests/test-c/nest1.h\"\n    nest = 1;\n# 3 \"tests/test-c/nest2.h\"\n    nest = 2;\n# 3 \"tests/test-c/nest3.h\"\n    nest = 3;\n# 3 \"tests/test-c/nest4.h\"\n    nest = 4;\n# 3 \"tests/test-c/nest5.h\"\n    nest = 5;\n# 3 \"tests/test-c/nest6.h\"\n    nest = 6;\n# 3 \"tests/test-c/nest7.h\"\n    nest = 7;\n# 4 \"tests/test-c/nest8.h\"\n    nest = 8;\n# 879 \"tests/test-c/n_std.c\"\n    assert( nest == 8);\n\n\n\n\n\n\n    nest = 32;\n\n    assert( nest == 32);\n\n\n    {\n        char * extremely_long_string =\n\"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567\"\n# 901 \"tests/test-c/n_std.c\"\n        ;\n        assert( strlen( extremely_long_string) == 507);\n    }\n\n\n    {\n    int a123456789012345678901234567890 = 123450; int b123456789012345678901234567890 = 123451; int c123456789012345678901234567890 = 123452; int d123456789012345678901234567890 = 123453; int e123456789012345678901234567890 = 123454; int f123456789012345678901234567890 = 123455; int A123456789012345678901234567890 = 123456; int B123456789012345678901234567890 = 123457; int C123456789012345678901234567890 = 123458; int D1234567890123456789012 = 123459;\n# 917 \"tests/test-c/n_std.c\"\n        assert( a123456789012345678901234567890 == 123450\n            && D1234567890123456789012 == 123459);\n    }\n# 927 \"tests/test-c/n_std.c\"\n    assert( 1);\n}\n"
  },
  {
    "path": "tests/n_std.py",
    "content": "\nimport unittest, time, difflib\nclock = time.process_time\n\nclass n_std(unittest.TestCase):\n    def runTest(self):\n        from pcpp import Preprocessor\n        import os\n\n        start = clock()\n        p = Preprocessor()\n        p.compress = 1\n        p.line_directive = '#'\n        p.define('__STDC__ 1')\n        p.define('__STDC_VERSION__ 199901L')\n        p.define('__DATE__ \"Jan 13 2020\"')\n        p.define('__TIME__ \"10:47:38\"')\n        p.define('NO_SYSTEM_HEADERS')\n        path = 'tests/test-c/n_std.c'\n        with open(path, 'rt') as ih:\n            p.parse(ih.read(), path)\n        with open('tests/n_std.i', 'w') as oh:\n            p.write(oh)\n        end = clock()\n        print(\"Preprocessed\", path, \"in\", end-start, \"seconds\")\n        self.assertEqual(p.return_code, 0)\n\n        with open('tests/n_std.i', 'rt') as ih:\n            written = ih.readlines()\n        with open('tests/n_std-pcpp.i', 'rt') as ih:\n            reference = ih.readlines()\n        if written != reference:\n            print(\"pcpp is not emitting its reference output! Differences:\")\n            for line in difflib.unified_diff(reference, written, fromfile='n_std-pcpp.i', tofile='n_std.i'):\n                print(line, end='')\n            self.assertTrue(False)\n        \n"
  },
  {
    "path": "tests/passthru.py",
    "content": "\nimport unittest, time\nfrom io import StringIO\nclock = time.process_time\n\nclass runner(object):\n    def runTest(self):\n        from pcpp import Preprocessor, OutputDirective, Action\n        import os, sys\n\n        class PassThruPreprocessor(Preprocessor):\n            def on_include_not_found(self,is_malformed,is_system_include,curdir,includepath):\n                raise OutputDirective(Action.IgnoreAndPassThrough)\n\n            def on_unknown_macro_in_defined_expr(self,tok):\n                return None  # Pass through as expanded as possible\n                \n            def on_unknown_macro_in_expr(self,ident):\n                return None  # Pass through as expanded as possible\n                \n            def on_unknown_macro_function_in_expr(self,ident):\n                return None  # Pass through as expanded as possible\n                \n            def on_directive_handle(self,directive,toks,ifpassthru,precedingtoks):\n                super(PassThruPreprocessor, self).on_directive_handle(directive,toks,ifpassthru,precedingtoks)\n                return None  # Pass through where possible\n\n            def on_directive_unknown(self,directive,toks,ifpassthru,precedingtoks):\n                if directive.value == 'error' or directive.value == 'warning':\n                    super(PassThruPreprocessor, self).on_directive_unknown(directive,toks,ifpassthru,precedingtoks)\n                # Pass through\n                raise OutputDirective(Action.IgnoreAndPassThrough)                \n\n            def on_comment(self,tok):\n                # Pass through\n                return True\n\n        start = clock()\n        p = PassThruPreprocessor()\n        p.passthru_expr_has_include = True\n        p.parse(self.input)\n        oh = StringIO()\n        p.write(oh)\n        end = clock()\n        print(\"Preprocessed test in\", end-start, \"seconds\")\n        if oh.getvalue() != self.output:\n            print(\"Should be:\\n\" + self.output + \"EOF\\n\", file = sys.stderr)\n            print(\"\\nWas:\\n\" + oh.getvalue()+\"EOF\\n\", file = sys.stderr)\n        self.assertEqual(p.return_code, 0)\n        self.assertEqual(oh.getvalue(), self.output)\n\n            \nclass test1(unittest.TestCase, runner):\n    input = r\"\"\"#if 5\nI am five\n#else\nI am not five\n#endif\"\"\"\n    output = r\"\"\"\nI am five\n\"\"\"\n\nclass test2(unittest.TestCase, runner):\n    input = r\"\"\"#if UNKNOWN\nI am five\n#else\nI am not five\n#endif\"\"\"\n    output = r\"\"\"#if UNKNOWN\nI am five\n#else\nI am not five\n#endif\n\"\"\"\n\nclass test3(unittest.TestCase, runner):\n    input = r\"\"\"#if UNKNOWN\nA\n#elif ALSO_UNKNOWN\nB\n#else\nC\n#endif\"\"\"\n    output = r\"\"\"#if UNKNOWN\nA\n#elif ALSO_UNKNOWN\nB\n#else\nC\n#endif\n\"\"\"\n\nclass test4(unittest.TestCase, runner):\n    input = r\"\"\"#define ALSO_UNKNOWN 1\n#if UNKNOWN\nA\n#elif ALSO_UNKNOWN\nB\n#else\nC\n#endif\"\"\"\n    output = r\"\"\"#define ALSO_UNKNOWN 1\n#if UNKNOWN\nA\n#elif 1\nB\n#else\nC\n#endif\n\"\"\"\n\nclass test5(unittest.TestCase, runner):\n    input = r\"\"\"#define ALSO_UNKNOWN 0\n#if UNKNOWN\nA\n#elif ALSO_UNKNOWN\nB\n#else\nC\n#endif\"\"\"\n    output = r\"\"\"#define ALSO_UNKNOWN 0\n#if UNKNOWN\nA\n\n\n#else\nC\n#endif\n\"\"\"\n\nclass test6(unittest.TestCase, runner):\n    input = r\"\"\"#define UNKNOWN 1\n#if UNKNOWN\nA\n#elif ALSO_UNKNOWN\nB\n#else\nC\n#endif\"\"\"\n    output = r\"\"\"#define UNKNOWN 1\n\nA\n\"\"\"\n\nclass test7(unittest.TestCase, runner):\n    input = r\"\"\"#define UNKNOWN 0\n#if UNKNOWN\nA\n#elif ALSO_UNKNOWN\nB\n#else\nC\n#endif\"\"\"\n    output = r\"\"\"#define UNKNOWN 0\n\n\n#if ALSO_UNKNOWN\nB\n#else\nC\n#endif\n\"\"\"\n\nclass test8(unittest.TestCase, runner):\n    input = r\"\"\"#define UNKNOWN 0\n#if UNKNOWN\n#if 1\nA\n#else\nAA\n#endif\n#elif ALSO_UNKNOWN\n#if 1\nB\n#else\nBB\n#endif\n#else\n#if 1\nC\n#else\nCC\n#endif\n#endif\"\"\"\n    output = r\"\"\"#define UNKNOWN 0\n\n\n\n\n\n\n#if ALSO_UNKNOWN\n\nB\n\n\n\n#else\n\nC\n\n\n\n#endif\n\"\"\"\n\nclass test9(unittest.TestCase, runner):\n    input = r\"\"\"#define KNOWN 0\n#if defined(UNKNOWN) || KNOWN\nA\n#endif\n\"\"\"\n    output = r\"\"\"#define KNOWN 0\n#if defined(UNKNOWN) || 0\nA\n#endif\n\"\"\"\n\nclass test10(unittest.TestCase, runner):\n    input = r\"\"\"#if !defined(__cpp_constexpr)\n#if __cplusplus >= 201402L\n#define __cpp_constexpr 201304  // relaxed constexpr\n#else\n#define __cpp_constexpr 190000\n#endif\n#endif\n\"\"\"\n    output = r\"\"\"#if !defined(__cpp_constexpr)\n#if __cplusplus >= 201402L\n#define __cpp_constexpr 201304  // relaxed constexpr\n#else\n#define __cpp_constexpr 190000\n#endif\n#endif\n\"\"\"\n\nclass test11(unittest.TestCase, runner):\n    input = r\"\"\"#define __cpp_constexpr 201304\n#if !defined(__cpp_constexpr)\n#if __cplusplus >= 201402L\n#define __cpp_constexpr 201304  // relaxed constexpr\n#else\n#define __cpp_constexpr 190000\n#endif\n#endif\n#ifndef BOOSTLITE_CONSTEXPR\n#if __cpp_constexpr >= 201304\n#define BOOSTLITE_CONSTEXPR constexpr\n#endif\n#endif\n#ifndef BOOSTLITE_CONSTEXPR\n#define BOOSTLITE_CONSTEXPR\n#endif\n\"\"\"\n    output = r\"\"\"#define __cpp_constexpr 201304\n#line 9\n#ifndef BOOSTLITE_CONSTEXPR\n\n#define BOOSTLITE_CONSTEXPR constexpr\n\n#endif\n\"\"\"\n\nclass test12(unittest.TestCase, runner):\n    input = r\"\"\"\n#define BOOST_OUTCOME_DISABLE_PREPROCESSED_INTERFACE_FILE\n\n#ifndef BOOST_OUTCOME_DISABLE_PREPROCESSED_INTERFACE_FILE\n\n#else\n\n#if defined(_MSC_VER) && !defined(__clang__)\n#define BOOST_OUTCOME_HEADERS_PATH2 BOOST_OUTCOME_VERSION_GLUE(v, BOOST_OUTCOME_HEADERS_VERSION, /monad.hpp)\n#elif 1\n#define BOOST_OUTCOME_HEADERS_PATH2 BOOST_OUTCOME_VERSION_GLUE(v, BOOST_OUTCOME_HEADERS_VERSION,)/monad.hpp\n#endif\n\n#endif\n\"\"\"\n    output = r\"\"\"\n#define BOOST_OUTCOME_DISABLE_PREPROCESSED_INTERFACE_FILE\n\n\n\n\n\n#if defined(_MSC_VER) && !defined(__clang__)\n#define BOOST_OUTCOME_HEADERS_PATH2 BOOST_OUTCOME_VERSION_GLUE(v, BOOST_OUTCOME_HEADERS_VERSION, /monad.hpp)\n#elif 1\n#define BOOST_OUTCOME_HEADERS_PATH2 BOOST_OUTCOME_VERSION_GLUE(v, BOOST_OUTCOME_HEADERS_VERSION,)/monad.hpp\n#endif\n\"\"\"\n\nclass test18(unittest.TestCase, runner):\n    input = r\"\"\"\n/*\nmultiline\ncomment\n*/\n\nvoid shouldBeOnLineSeven();\n\"\"\"\n    output = r\"\"\"\n/*\nmultiline\ncomment\n*/\n\nvoid shouldBeOnLineSeven();\n\"\"\"\n\nclass test19(unittest.TestCase, runner):\n    input = r\"\"\"\n/*\na\ncomment\nthat\nspans\neight\nlines\n*/\n\nvoid shouldBeOnLineEleven();\"\"\"\n    output = r\"\"\"\n/*\na\ncomment\nthat\nspans\neight\nlines\n*/\n\nvoid shouldBeOnLineEleven();\n\"\"\"\n\nclass test20(unittest.TestCase, runner):\n    input = r\"\"\"\n#include ASIO_CUSTOM_HANDLER_TRACKING\n\"\"\"\n    output = r\"\"\"\n#include ASIO_CUSTOM_HANDLER_TRACKING\n\"\"\"\n\nclass test21(unittest.TestCase, runner):\n    input = r\"\"\"\n#if !FOO(5)\nhi\n#endif\n\"\"\"\n    output = r\"\"\"\n#if !FOO(5)\nhi\n#endif\n\"\"\"\n\nclass test22(unittest.TestCase, runner):\n    input = r\"\"\"\n#if !__has_include(<variant>)\nhi\n#endif\n\"\"\"\n    output = r\"\"\"\n#if !__has_include(<variant>)\nhi\n#endif\n\"\"\"\n\nclass test23(unittest.TestCase, runner):\n    input = r\"\"\"\n#if 0  // Do NOT enable weakened implicit construction for these types\nhi\n#endif\n\"\"\"\n    output = r\"\"\"\"\"\"\n\n\nif __name__ == '__main__':\n    unittest.main()\n    "
  },
  {
    "path": "tests/test-c/LICENSE",
    "content": "/*-\n * Copyright (c) 1998, 2002-2008 Kiyoshi Matsui <kmatsui@t3.rim.or.jp>\n * All rights reserved.\n *\n * This software including the files in this directory is provided under\n * the following license.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n *    notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n"
  },
  {
    "path": "tests/test-c/defs.h",
    "content": "/* defs.h   */\n\n#ifndef NO_SYSTEM_HEADERS\n#include    <stdio.h>\n\n/* assert(): Enable one of these three. */\n/* Note: This source doesn't use #elif directive to test preprocessor which\n        can't recognize the directive.  */\n#if     1   /* For the translator which can process <assert.h> properly.    */\n#include    <assert.h>\n#else\n#if     0   /* Not to abort on error.   */\n#define     assert( exp)    (exp) ? (void)0 : (void) fprintf( stderr,   \\\n        \"Assertion failed: %s, from line %d of file %s\\n\",  \\\n        # exp, __LINE__, __FILE__)\n#endif\n#endif\n\nextern int      strcmp( const char *, const char *);\nextern size_t   strlen( const char *);\nextern void     exit( int);\n\n#endif"
  },
  {
    "path": "tests/test-c/e_12_8.c",
    "content": "/* e_12_8.c:    Out of range of integer pp-token in #if expression. */\n\n/* 12.8:    Preprocessing number perhaps out of range of unsigned long. */\n#if     123456789012345678901\n#endif\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_14.c",
    "content": "/* e_14.c:  Illegal #if expressions.    */\n\n#define A   1\n#define B   1\n\n/* 14.1:    String literal is not allowed in #if expression.    */\n#if     \"string\"\n#endif      /* The second error ?   */\n\n/* 14.2:    Operators =, +=, ++, etc. are not allowed in #if expression.    */\n#if     A = B\n#endif\n#if     A++ B\n#endif\n#if     A --B\n#endif\n#if     A.B\n#endif\n\n/* 14.3:    Unterminated #if expression.    */\n#if     0 <\n#endif\n#if     ( (A == B)\n#endif\n\n/* 14.4:    Unbalanced parenthesis in #if defined operator. */\n#if     defined ( MACRO\n#endif\n\n/* 14.5:    No argument.    */\n#if\n#endif\n\n/* 14.6:    Macro expanding to 0 token in #if expression.   */\n#define ZERO_TOKEN\n#if     ZERO_TOKEN\n#endif\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_14_10.c",
    "content": "/* e_14_10.c:   Overflow of constant expression in #if directive.   */\n\n/* 14.10:   */\n#include    <limits.h>\n\n#if     LONG_MAX - LONG_MIN\n#endif\n#if     LONG_MAX + 1\n#endif\n#if     LONG_MIN - 1\n#endif\n#if     LONG_MAX * 2\n#endif\n\nint main( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_14_7.c",
    "content": "/* e_14_7.c:    There is no keyword in #if expression.  */\n\n/* 14.7:    sizeof operator is disallowed.  */\n/*  Evaluated as: 0 (0)\n    Constant expression syntax error.   */\n#if     sizeof (int)\n#endif\n\n/* 14.8:    type cast is disallowed.    */\n/*  Evaluated as: (0)0x8000\n    Also a constant expression error.   */\n#if     (int)0x8000 < 0\n#endif\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_14_9.c",
    "content": "/* e_14_9.c:    Out of range in #if expression (division by 0). */\n\n/* 14.9:    Divided by 0.   */\n#if     1 / 0\n#endif\n\nint main( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_15_3.c",
    "content": "/* e_15_3.c:    #ifdef, #ifndef syntax errors.  */\n\n/* 15.3:    Not an identifier.  */\n#ifdef  \"string\"\n#endif\n#ifdef  123\n#endif\n\n/* 15.4:    Excessive token sequence.   */\n#ifdef  MACRO   Junk\n#endif\n\n/* 15.5:    No argument.    */\n#ifndef\n#endif\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_16.c",
    "content": "/* e_16.c:  Trailing junk of #else, #endif. */\n\n/* 16.1:    Trailing junk of #else. */\n#define MACRO_0     0\n#if     MACRO_0\n#else   MACRO_0\n\n/* 16.2:    Trailing junk of #endif.    */\n#endif  MACRO_0\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_17.c",
    "content": "/* e_17.c:  Ill-formed group in a source file.  */\n\n#define MACRO_1     1\n\n/* 17.1:    Error of #endif without #if.    */\n#endif\n\n/* 17.2:    Error of #else without #if. */\n#else\n\n/* 17.3:    Error of #else after #else. */\n#if     MACRO_1\n#else\n#else\n#endif\n\n/* 17.4:    Error of #elif after #else. */\n#if     MACRO_1 == 1\n#else\n#elif   MACRO_1 == 0\n#endif\n\n/* 17.5:    Error of #endif without #if in an included file.    */\n#if     1\n#include    \"unbal1.h\"\n\n/* 17.6:    Error of unterminated #if section in an included file.  */\n#include    \"unbal2.h\"\n#endif\n\n/* 17.7:    Error of unterminated #if section.  */\n#if     MACRO_1 == 0\n#else\n\n"
  },
  {
    "path": "tests/test-c/e_18_4.c",
    "content": "/* e_18_4.c:    #define syntax errors.  */\n\n/* 18.4:    Not an identifier.  */\n#define \"string\"\n#define 123\n\n/* 18.5:    No argument.    */\n#define\n\n/* 18.6:    Empty parameter list.   */\n#define math( op, a, )      op( (a), (b))\n\n/* 18.7:    Duplicate parameter names.  */\n#define math( op, a, a)     op( (a), (b))\n\n/* 18.8:    Argument is not an identifier.  */\n#define NUMARGS( 1, +, 2)   (1 + 2)\n\n/* 18.9:    No space between macro name and replacement text.   */\n/*\n    C90 (Corrigendum 1) forbids this if and only the replacement text begins\n        with a non-basic-character.\n    C99 forbids this even when the replacement text begins with basic-\n        character.\n*/\n/*  From ISO 9899:1990 / Corrigendum 1. */\n#define THIS$AND$THAT(a, b)     ((a) + (b))\n/* Note: the following definition is legal (object-like macro).\n#define THIS $AND$THAT(a, b)    ((a) + (b))\n*/\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_19_3.c",
    "content": "/* e_19_3.c:    Redefinitions of macros.    */\n\n#include    \"defs.h\"\n#define     str( s)     # s\n#define     xstr( s)    str( s)\n\n/* Excerpts from ISO C 6.8.3 \"Examples\".    */\n\n#define OBJ_LIKE        (1-1)\n#define FTN_LIKE(a)     ( a )\n\n/* The following redefinitions should be diagnosed. */\n\n/* 19.3:    */\n#define OBJ_LIKE        (0)     /* different token sequence     */\n\n/* 19.4:    */\n#undef  OBJ_LIKE\n#define OBJ_LIKE        (1-1)\n#define OBJ_LIKE        (1 - 1) /* different white space        */\n\n/* 19.5:    */\n#define FTN_LIKE(b)     ( a )   /* different parameter usage    */\n\n/* 19.6:    */\n#undef  FTN_LIKE\n#define FTN_LIKE(a)     ( a )\n#define FTN_LIKE(b)     ( b )   /* different parameter spelling */\n\n/* 19.7:    Not in ISO C \"Examples\" */\n#define FTN_LIKE        OBJ_LIKE\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_23_3.c",
    "content": "/* e_23_3.c:    ## operator shall not occur at the beginning or at the end of\n        replacement list for either form of macro definition.   */\n\n/* 23.3:    In object-like macro.   */\n#define con     ## name\n#define cat     12 ##\n\n/* 23.4:    In function-like macro. */\n#define CON( a, b)  ## a ## b\n#define CAT( b, c)  b ## c ##\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_24_6.c",
    "content": "/* e_24_6.c:    Operand of # operator in function-like macro definition should\n        be a parameter. */\n\n/* 24.6:    */\n#define FUNC( a)    # b\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_25_6.c",
    "content": "/* e_25_6.c:    Macro arguments are pre-expanded separately.    */\n\n/* 25.6:    */\n#define sub( x, y)      (x - y)\n#define head            sub(\n#define body(x,y)       x,y\n#define tail            )\n#define head_body_tail( a, b, c)    a b c\n/* \"head\" is once expanded to \"sub(\", then rescanning of \"sub(\" causes an\n        uncompleted macro call.  Expansion of an argument should complete\n        within the argument.    */\n    head_body_tail( head, body(a,b), tail);\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_27_7.c",
    "content": "/* e_27_7.c:    Error of rescanning.    */\n\n#define sub( x, y)      (x - y)\n\n/* 27.7:    */\n#define TWO_TOKENS      a,b\n#define SUB( x, y)      sub( x, y)\n/* Too many arguments error while rescanning after once replaced to:\n    sub( a,b, 1);   */\n    SUB( TWO_TOKENS, 1);\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_29_3.c",
    "content": "/* e_29_3.c:    #undef errors.  */\n\n/* 29.3:    Not an identifier.  */\n#undef  \"string\"\n#undef  123\n\n/* 29.4:    Excessive token sequence.   */\n#undef  MACRO_0     Junk\n\n/* 29.5:    No argument.    */\n#undef\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_31.c",
    "content": "/* e_31.c:  Illegal macro calls.    */\n\n#define sub( x, y)      (x - y)\n\n/* 31.1:    Too many arguments error.   */\n    sub( x, y, z);\n\n/* 31.2:    Too few arguments error.    */\n    sub( x);\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_31_3.c",
    "content": "/* e_31_3.c:    Macro call in control line should complete in the line. */\n\n#define glue( a, b)     a ## b\n#define str( s)         # s\n#define xstr( s)        str( s)\n\n/* 31.3:    Unterminated macro call.    */\n#include    xstr( glue( header,\n    .h))\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_32_5.c",
    "content": "/* e_32_5.c:    Range error of character constant.  */\n\n/* 32.5:    Value of a numerical escape sequence in character constant should\n        be in the range of char.    */\n#if     '\\x123' == 0x123        /* Out of range.    */\n#endif\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_33_2.c",
    "content": "/* e_33_2.c:    Out of range of numerical escape sequence in wide-char. */\n\n/* 33.2:    Value of a numerical escape sequence in wide-character constant\n        should be in the range of wchar_t.  */\n#if     L'\\xabcdef012' == 0xbcdef012        /* Perhaps out of range.    */\n#endif\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_35_2.c",
    "content": "/* e_35_2.c:    Out of range of character constant. */\n\n/* In ASCII character set.  */\n/* 35.2:    */\n#if     'abcdefghi' == '\\x61\\x62\\x63\\x64\\x65\\x66\\x67\\x68\\x69'\n        /* Perhaps out of range.    */\n#endif\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_4_3.c",
    "content": "/* e_4_3.c:     Illegal pp-token.   */\n\n/* 4.3:     Empty character constant is an error.   */\n#if     '' == 0     /* This line is invalid, maybe skipped. */\n#endif              /* This line maybe the second error.    */\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_7_4.c",
    "content": "/* e_7_4.c:     #line error.    */\n\n/* 7.4:     string literal in #line directive shall be a character string\n        literal.    */\n\n#line   123     L\"wide\"\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/e_std.c",
    "content": "/*\n *      e_std.c\n *\n * 1998/08      made public                                     kmatsui\n * 2002/08      revised not to conflict with C99 Standard       kmatsui\n * 2003/11      added a few samples                             kmatsui\n *\n * Samples to test Standard C preprocessing.\n * Preprocessor must diagnose all of these samples appropriately.\n */\n\n\nvoid    e_19_3( void);\nvoid    e_25_6( void);\nvoid    e_27_6( void);\nvoid    e_31( void);\n\nmain( void)\n{\n    e_19_3();\n    e_25_6();\n    e_27_6();\n    e_31();\n\n    return  0;\n}\n\n\n/*      Illegal pp-token.   */\n\n/* 4.3:     Empty character constant is an error.   */\n#if     '' == 0     /* This line is invalid, maybe skipped. */\n#endif              /* This line maybe the second error.    */\n\n\n/*      #line error.    */\n\n/* 7.4:     string literal in #line directive shall be a character string\n    literal.    */\n\n#line   123     L\"wide\"\n\n\n/*      Out of range of integer pp-token in #if expression. */\n/* Note:    Tests of character constant overflow are in 32.5, 33.2, 35.2.   */\n\n/* 12.8:    Preprocessing number perhaps out of range of unsigned long. */\n#if     123456789012345678901\n#endif\n\n\n/*      Illegal #if expressions.    */\n\n#define A   1\n#define B   1\n\n/* 14.1:    String literal is not allowed in #if expression.    */\n#if     \"string\"\n#endif      /* The second error ?   */\n\n/* 14.2:    Operators =, +=, ++, etc. are not allowed in #if expression.    */\n#if     A = B\n#endif\n#if     A++ B\n#endif\n#if     A --B\n#endif\n#if     A.B\n#endif\n\n/* 14.3:    Unterminated #if expression.    */\n#if     0 <\n#endif\n#if     ( (A == B)\n#endif\n\n/* 14.4:    Unbalanced parenthesis in #if defined operator. */\n#if     defined ( MACRO\n#endif\n\n/* 14.5:    No argument.    */\n#if\n#endif\n\n/* 14.6:    Macro expanding to 0 token in #if expression.   */\n#define ZERO_TOKEN\n#if     ZERO_TOKEN\n#endif\n\n\n/*      There is no keyword in #if expression.  */\n\n/* 14.7:    sizeof operator is disallowed.  */\n/*  Evaluated as: 0 (0)\n    Constant expression syntax error.   */\n#if     sizeof (int)\n#endif\n\n/* 14.8:    type cast is disallowed.    */\n/*  Evaluated as: (0)0x8000\n    Also a constant expression error.   */\n#if     (int)0x8000 < 0\n#endif\n\n\n/*      Out of range in #if expression (division by 0). */\n\n/* 14.9:    Divided by 0.   */\n#if     1 / 0\n#endif\n\n\n/*      Overflow of constant expression in #if directive.   */\n\n/* 14.10:   */\n#include    <limits.h>\n\n#if     LONG_MAX - LONG_MIN\n#endif\n#if     LONG_MAX + 1\n#endif\n#if     LONG_MIN - 1\n#endif\n#if     LONG_MAX * 2\n#endif\n\n\n/*      #ifdef, #ifndef syntax errors.  */\n\n/* 15.3:    Not an identifier.  */\n#ifdef  \"string\"\n#endif\n#ifdef  123\n#endif\n\n/* 15.4:    Excessive token sequence.   */\n#ifdef  A   Junk\n#endif\n\n/* 15.5:    No argument.    */\n#ifndef\n#endif\n\n\n/*      Trailing junk of #else, #endif. */\n\n/* 16.1:    Trailing junk of #else. */\n#define MACRO_0     0\n#if     MACRO_0\n#else   MACRO_0\n\n/* 16.2:    Trailing junk of #endif.    */\n#endif  MACRO_0\n\n\n/*      Ill-formed group in a source file.  */\n\n#define MACRO_1     1\n\n/* 17.1:    Error of #endif without #if.    */\n#endif\n\n/* 17.2:    Error of #else without #if. */\n#else\n\n/* 17.3:    Error of #else after #else. */\n#if     MACRO_1\n#else                   /* line 168 */\n#if     1\n#else\n#endif\n#else\n#endif\n\n/* 17.4:    Error of #elif after #else. */\n#if     MACRO_1 == 1\n#else                   /* line 177 */\n#elif   MACRO_1 == 0\n#endif\n\n/* 17.5:    Error of #endif without #if in an included file.    */\n#if     1\n#include    \"unbal1.h\"\n\n/* 17.6:    Error of unterminated #if section in an included file.  */\n#include    \"unbal2.h\"\n#endif\n\n/* 17.7:    Error of unterminated #if section.  */\n/* An error would be reported at end of file.   */\n#if     MACRO_1 == 0    /* line 191 */\n#else\n\n\n/*      #define syntax errors.  */\n\n/* 18.4:    Not an identifier.  */\n#define \"string\"\n#define 123\n\n/* 18.5:    No argument.    */\n#define\n\n/* 18.6:    Empty parameter list.   */\n#define math( op, a, )      op( (a), (b))\n\n/* 18.7:    Duplicate parameter names.  */\n#define math( op, a, a)     op( (a), (b))\n\n/* 18.8:    Parameter is not an identifier. */\n#define NUMARGS( 1, +, 2)   (1 + 2)\n\n/* 18.9:    No space between macro name and replacement text.   */\n/*\n    C90 (Corrigendum 1) forbids this if and only the replacement text begins\n        with a non-basic-character.\n    C99 forbids this even when the replacement text begins with basic-\n        character.\n*/\n/*  From ISO 9899:1990 / Corrigendum 1. */\n#define THIS$AND$THAT(a, b)     ((a) + (b))\n/* Note: the following definition is legal (object-like macro).\n#define THIS $AND$THAT(a, b)    ((a) + (b))\n*/\n\n\n/*      Redefinitions of macros.    */\n\n#define     str( s)     # s\n#define     xstr( s)    str( s)\n\n/* Excerpts from ISO C 6.8.3 \"Examples\".    */\n\n#define OBJ_LIKE        (1-1)\n#define FTN_LIKE(a)     ( a )\n\nvoid    e_19_3( void)\n{\n/* The following redefinitions should be diagnosed. */\n\n/* 19.3:    */\n#define OBJ_LIKE        (0)     /* different token sequence     */\n\n/* 19.4:    */\n#undef  OBJ_LIKE\n#define OBJ_LIKE        (1-1)\n#define OBJ_LIKE        (1 - 1) /* different white space        */\n\n/* 19.5:    */\n#define FTN_LIKE(b)     ( a )   /* different parameter usage    */\n\n/* 19.6:    */\n#undef  FTN_LIKE\n#define FTN_LIKE(a)     ( a )\n#define FTN_LIKE(b)     ( b )   /* different parameter spelling */\n\n/* 19.7:    Not in ISO C \"Examples\" */\n#define FTN_LIKE        OBJ_LIKE\n}\n\n\n/*      ## operator shall not occur at the beginning or at the end of\n        replacement list for either form of macro definition.   */\n\n/* 23.3:    In object-like macro.   */\n#define con     ## name\n#define cat     12 ##\n\n/* 23.4:    In function-like macro. */\n#define CON( a, b)  ## a ## b\n#define CAT( b, c)  b ## c ##\n\n\n/*      Operand of # operator in function-like macro definition shall\n        be a parameter name.    */\n\n/* 24.6:    */\n#define FUNC( a)    # b\n\n\n/*      Macro arguments are pre-expanded separately.    */\n\n/* 25.6:    */\n#define sub( x, y)      (x - y)\n#define head            sub(\n#define body(x,y)       x,y\n#define tail            )\n#define head_body_tail( a, b, c)    a b c\n\nvoid    e_25_6( void)\n{\n/* \"head\" is once replaced to \"sub(\", then rescanning of \"sub(\" causes an\n        uncompleted macro call.  Expansion of an argument should complete\n        within the argument.    */\n    head_body_tail( head, body(a,b), tail);\n}\n\n\n/*      Error of rescanning.    */\n\n/* 27.7:    */\n#define TWO_ARGS        a,b\n#define SUB( x, y)      sub( x, y)\n\nvoid    e_27_7( void)\n{\n/* Too many arguments error while rescanning after once replaced to:\n    sub( a,b, 1);   */\n    SUB( TWO_ARGS, 1);\n}\n\n\n/*      #undef errors.  */\n\n/* 29.3:    Not an identifier.  */\n#undef  \"string\"\n#undef  123\n\n/* 29.4:    Excessive token sequence.   */\n#undef  MACRO_0     Junk\n\n/* 29.5:    No argument.    */\n#undef\n\n\n/*      Illegal macro calls.    */\n\nvoid    e_31( void)\n{\n    int     x = 1, y = 2;\n\n/* 31.1:    Too many arguments error.   */\n    sub( x, y, z);\n\n/* 31.2:    Too few arguments error.    */\n    sub( x);\n}\n\n\n/*      Macro call in control line should complete in the line. */\n\n#define glue( a, b)     a ## b\n\n/* 31.3:    Unterminated macro call.    */\n#include    xstr( glue( header,\n    .h))\n\n\n/*      Range error of character constant.  */\n\n/* 32.5:    Value of a numerical escape sequence in character constant should\n        be in the range of char.    */\n#if     '\\x123' == 0x123        /* Out of range.    */\n#endif\n\n\n/*      Out of range of numerical escape sequence in wide-char. */\n\n/* 33.2:    Value of a numerical escape sequence in wide-character constant\n        should be in the range of wchar_t.  */\n#if     L'\\xabcdef012' == 0xbcde012        /* Perhaps out of range.    */\n#endif\n\n\n/*      Out of range of character constant. */\n\n/* In ASCII character set.  */\n/* 35.2:    */\n#if     'abcdefghi' == '\\x61\\x62\\x63\\x64\\x65\\x66\\x67\\x68\\x69'\n        /* Perhaps out of range.    */\n#endif\n\n\n/* Error of \"unterminated #if section started at line 191\" will be reported\n    at end of file. */\n\n"
  },
  {
    "path": "tests/test-c/header.h",
    "content": "/* header.h */\n\n#define MACRO_abc   abc\n"
  },
  {
    "path": "tests/test-c/i_32_3.c",
    "content": "/* i_32_3.c:    Character constant in #if expression.   */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 32.3:    */\n#if     'a' != 0x61\n    fputs( \"Not ASCII character set, or bad evaluation of character constant.\"\n            , stderr);\n    exit( 1);\n#endif\n\n/* 32.4:    '\\a' and '\\v'   */\n#if     '\\a' != 7 || '\\v' != 11\n    fputs( \"Not ASCII character set, or bad evaluation of escape sequences.\"\n            , stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/i_35.c",
    "content": "/* i_35.c:  Multi-character character constant. */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* In ASCII character set.  */\n/* 35.1:    */\n#if     ('ab' != '\\x61\\x62') || ('\\aa' != '\\7\\x61')\n    fputs( \"Bad handling of multi-character character constant.\\n\", stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/i_35_3.c",
    "content": "/* i_35_3.c:    Multi-character wide character constant.    */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* In ASCII character set.  */\n/* 35.3:    */\n#if     (L'ab' != L'\\x61\\x62') || (L'ab' == 'ab')\n    fputs( \"Bad handling of multi-character wide character constant.\\n\",\n            stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n"
  },
  {
    "path": "tests/test-c/ifdef15.h",
    "content": "/* ifdef15.h    */\n\n#ifdef  X01\n#else\n#ifdef  X02\n#else\n#ifdef  X03\n#else\n#ifdef  X04\n#else\n#ifdef  X05\n#else\n#ifdef  X06\n#else\n#ifdef  X07\n#else\n#ifdef  X08\n#else\n#ifdef  X09\n#else\n#ifdef  X0A\n#else\n#ifdef  X0B\n#else\n#ifdef  X0C\n#else\n#ifdef  X0D\n#else\n#ifdef  X0E\n#else\n#ifdef  X0F\n    nest = 0x0f;\n#endif  /* X0F  */\n#endif  /* X0E  */\n#endif  /* X0D  */\n#endif  /* X0C  */\n#endif  /* X0B  */\n#endif  /* X0A  */\n#endif  /* X09  */\n#endif  /* X08  */\n#endif  /* X07  */\n#endif  /* X06  */\n#endif  /* X05  */\n#endif  /* X04  */\n#endif  /* X03  */\n#endif  /* X02  */\n#endif  /* X01  */\n\n"
  },
  {
    "path": "tests/test-c/line.h",
    "content": "/* line.h   */\n\n{\n    char *  file = __FILE__;\n    file += strlen( file) - 6;\n    assert( __LINE__ == 6 && strcmp( file, \"line.h\") == 0);\n}\n"
  },
  {
    "path": "tests/test-c/m1024.h",
    "content": "/* m1024.h  */\n\n#define AA\n#define AB\n#define AC\n#define AD\n#define AE\n#define AF\n#define AG\n#define AH\n#define AI\n#define AJ\n#define AK\n#define AL\n#define AM\n#define AN\n#define AO\n#define AP\n#define AQ\n#define AR\n#define AS\n#define AT\n#define AU\n#define AV\n#define AW\n#define AX\n#define AY\n#define AZ\n#define Aa\n#define Ab\n#define Ac\n#define Ad\n#define Ae\n#define Af\n#define Ag\n#define Ah\n#define Ai\n#define Aj\n#define Ak\n#define Al\n#define Am\n#define An\n#define BA\n#define BB\n#define BC\n#define BD\n#define BE\n#define BF\n#define BG\n#define BH\n#define BI\n#define BJ\n#define BK\n#define BL\n#define BM\n#define BN\n#define BO\n#define BP\n#define BQ\n#define BR\n#define BS\n#define BT\n#define BU\n#define BV\n#define BW\n#define BX\n#define BY\n#define BZ\n#define Ba\n#define Bb\n#define Bc\n#define Bd\n#define Be\n#define Bf\n#define Bg\n#define Bh\n#define Bi\n#define Bj\n#define Bk\n#define Bl\n#define Bm\n#define Bn\n#define CA\n#define CB\n#define CC\n#define CD\n#define CE\n#define CF\n#define CG\n#define CH\n#define CI\n#define CJ\n#define CK\n#define CL\n#define CM\n#define CN\n#define CO\n#define CP\n#define CQ\n#define CR\n#define CS\n#define CT\n#define CU\n#define CV\n#define CW\n#define CX\n#define CY\n#define CZ\n#define Ca\n#define Cb\n#define Cc\n#define Cd\n#define Ce\n#define Cf\n#define Cg\n#define Ch\n#define Ci\n#define Cj\n#define Ck\n#define Cl\n#define Cm\n#define Cn\n#define DA\n#define DB\n#define DC\n#define DD\n#define DE\n#define DF\n#define DG\n#define DH\n#define DI\n#define DJ\n#define DK\n#define DL\n#define DM\n#define DN\n#define DO\n#define DP\n#define DQ\n#define DR\n#define DS\n#define DT\n#define DU\n#define DV\n#define DW\n#define DX\n#define DY\n#define DZ\n#define Da\n#define Db\n#define Dc\n#define Dd\n#define De\n#define Df\n#define Dg\n#define Dh\n#define Di\n#define Dj\n#define Dk\n#define Dl\n#define Dm\n#define Dn\n#define EA\n#define EB\n#define EC\n#define ED\n#define EE\n#define EF\n#define EG\n#define EH\n#define EI\n#define EJ\n#define EK\n#define EL\n#define EM\n#define EN\n#define EO\n#define EP\n#define EQ\n#define ER\n#define ES\n#define ET\n#define EU\n#define EV\n#define EW\n#define EX\n#define EY\n#define EZ\n#define Ea\n#define Eb\n#define Ec\n#define Ed\n#define Ee\n#define Ef\n#define Eg\n#define Eh\n#define Ei\n#define Ej\n#define Ek\n#define El\n#define Em\n#define En\n#define FA\n#define FB\n#define FC\n#define FD\n#define FE\n#define FF\n#define FG\n#define FH\n#define FI\n#define FJ\n#define FK\n#define FL\n#define FM\n#define FN\n#define FO\n#define FP\n#define FQ\n#define FR\n#define FS\n#define FT\n#define FU\n#define FV\n#define FW\n#define FX\n#define FY\n#define FZ\n#define Fa\n#define Fb\n#define Fc\n#define Fd\n#define Fe\n#define Ff\n#define Fg\n#define Fh\n#define Fi\n#define Fj\n#define Fk\n#define Fl\n#define Fm\n#define Fn\n#define GA\n#define GB\n#define GC\n#define GD\n#define GE\n#define GF\n#define GG\n#define GH\n#define GI\n#define GJ\n#define GK\n#define GL\n#define GM\n#define GN\n#define GO\n#define GP\n#define GQ\n#define GR\n#define GS\n#define GT\n#define GU\n#define GV\n#define GW\n#define GX\n#define GY\n#define GZ\n#define Ga\n#define Gb\n#define Gc\n#define Gd\n#define Ge\n#define Gf\n#define Gg\n#define Gh\n#define Gi\n#define Gj\n#define Gk\n#define Gl\n#define Gm\n#define Gn\n#define HA\n#define HB\n#define HC\n#define HD\n#define HE\n#define HF\n#define HG\n#define HH\n#define HI\n#define HJ\n#define HK\n#define HL\n#define HM\n#define HN\n#define HO\n#define HP\n#define HQ\n#define HR\n#define HS\n#define HT\n#define HU\n#define HV\n#define HW\n#define HX\n#define HY\n#define HZ\n#define Ha\n#define Hb\n#define Hc\n#define Hd\n#define He\n#define Hf\n#define Hg\n#define Hh\n#define Hi\n#define Hj\n#define Hk\n#define Hl\n#define Hm\n#define Hn\n#define IA\n#define IB\n#define IC\n#define ID\n#define IE\n#define IF\n#define IG\n#define IH\n#define II\n#define IJ\n#define IK\n#define IL\n#define IM\n#define IN\n#define IO\n#define IP\n#define IQ\n#define IR\n#define IS\n#define IT\n#define IU\n#define IV\n#define IW\n#define IX\n#define IY\n#define IZ\n#define Ia\n#define Ib\n#define Ic\n#define Id\n#define Ie\n#define If\n#define Ig\n#define Ih\n#define Ii\n#define Ij\n#define Ik\n#define Il\n#define Im\n#define In\n#define JA\n#define JB\n#define JC\n#define JD\n#define JE\n#define JF\n#define JG\n#define JH\n#define JI\n#define JJ\n#define JK\n#define JL\n#define JM\n#define JN\n#define JO\n#define JP\n#define JQ\n#define JR\n#define JS\n#define JT\n#define JU\n#define JV\n#define JW\n#define JX\n#define JY\n#define JZ\n#define Ja\n#define Jb\n#define Jc\n#define Jd\n#define Je\n#define Jf\n#define Jg\n#define Jh\n#define Ji\n#define Jj\n#define Jk\n#define Jl\n#define Jm\n#define Jn\n#define KA\n#define KB\n#define KC\n#define KD\n#define KE\n#define KF\n#define KG\n#define KH\n#define KI\n#define KJ\n#define KK\n#define KL\n#define KM\n#define KN\n#define KO\n#define KP\n#define KQ\n#define KR\n#define KS\n#define KT\n#define KU\n#define KV\n#define KW\n#define KX\n#define KY\n#define KZ\n#define Ka\n#define Kb\n#define Kc\n#define Kd\n#define Ke\n#define Kf\n#define Kg\n#define Kh\n#define Ki\n#define Kj\n#define Kk\n#define Kl\n#define Km\n#define Kn\n#define LA\n#define LB\n#define LC\n#define LD\n#define LE\n#define LF\n#define LG\n#define LH\n#define LI\n#define LJ\n#define LK\n#define LL\n#define LM\n#define LN\n#define LO\n#define LP\n#define LQ\n#define LR\n#define LS\n#define LT\n#define LU\n#define LV\n#define LW\n#define LX\n#define LY\n#define LZ\n#define La\n#define Lb\n#define Lc\n#define Ld\n#define Le\n#define Lf\n#define Lg\n#define Lh\n#define Li\n#define Lj\n#define Lk\n#define Ll\n#define Lm\n#define Ln\n#define MA\n#define MB\n#define MC\n#define MD\n#define ME\n#define MF\n#define MG\n#define MH\n#define MI\n#define MJ\n#define MK\n#define ML\n#define MM\n#define MN\n#define MO\n#define MP\n#define MQ\n#define MR\n#define MS\n#define MT\n#define MU\n#define MV\n#define MW\n#define MX\n#define MY\n#define MZ\n#define Ma\n#define Mb\n#define Mc\n#define Md\n#define Me\n#define Mf\n#define Mg\n#define Mh\n#define Mi\n#define Mj\n#define Mk\n#define Ml\n#define Mm\n#define Mn\n#define NA\n#define NB\n#define NC\n#define ND\n#define NE\n#define NF\n#define NG\n#define NH\n#define NI\n#define NJ\n#define NK\n#define NL\n#define NM\n#define NN\n#define NO\n#define NP\n#define NQ\n#define NR\n#define NS\n#define NT\n#define NU\n#define NV\n#define NW\n#define NX\n#define NY\n#define NZ\n#define Na\n#define Nb\n#define Nc\n#define Nd\n#define Ne\n#define Nf\n#define Ng\n#define Nh\n#define Ni\n#define Nj\n#define Nk\n#define Nl\n#define Nm\n#define Nn\n#define OA\n#define OB\n#define OC\n#define OD\n#define OE\n#define OF\n#define OG\n#define OH\n#define OI\n#define OJ\n#define OK\n#define OL\n#define OM\n#define ON\n#define OO\n#define OP\n#define OQ\n#define OR\n#define OS\n#define OT\n#define OU\n#define OV\n#define OW\n#define OX\n#define OY\n#define OZ\n#define Oa\n#define Ob\n#define Oc\n#define Od\n#define Oe\n#define Of\n#define Og\n#define Oh\n#define Oi\n#define Oj\n#define Ok\n#define Ol\n#define Om\n#define On\n#define PA\n#define PB\n#define PC\n#define PD\n#define PE\n#define PF\n#define PG\n#define PH\n#define PI\n#define PJ\n#define PK\n#define PL\n#define PM\n#define PN\n#define PO\n#define PP\n#define PQ\n#define PR\n#define PS\n#define PT\n#define PU\n#define PV\n#define PW\n#define PX\n#define PY\n#define PZ\n#define Pa\n#define Pb\n#define Pc\n#define Pd\n#define Pe\n#define Pf\n#define Pg\n#define Ph\n#define Pi\n#define Pj\n#define Pk\n#define Pl\n#define Pm\n#define Pn\n#define QA\n#define QB\n#define QC\n#define QD\n#define QE\n#define QF\n#define QG\n#define QH\n#define QI\n#define QJ\n#define QK\n#define QL\n#define QM\n#define QN\n#define QO\n#define QP\n#define QQ\n#define QR\n#define QS\n#define QT\n#define QU\n#define QV\n#define QW\n#define QX\n#define QY\n#define QZ\n#define Qa\n#define Qb\n#define Qc\n#define Qd\n#define Qe\n#define Qf\n#define Qg\n#define Qh\n#define Qi\n#define Qj\n#define Qk\n#define Ql\n#define Qm\n#define Qn\n#define RA\n#define RB\n#define RC\n#define RD\n#define RE\n#define RF\n#define RG\n#define RH\n#define RI\n#define RJ\n#define RK\n#define RL\n#define RM\n#define RN\n#define RO\n#define RP\n#define RQ\n#define RR\n#define RS\n#define RT\n#define RU\n#define RV\n#define RW\n#define RX\n#define RY\n#define RZ\n#define Ra\n#define Rb\n#define Rc\n#define Rd\n#define Re\n#define Rf\n#define Rg\n#define Rh\n#define Ri\n#define Rj\n#define Rk\n#define Rl\n#define Rm\n#define Rn\n#define SA\n#define SB\n#define SC\n#define SD\n#define SE\n#define SF\n#define SG\n#define SH\n#define SI\n#define SJ\n#define SK\n#define SL\n#define SM\n#define SN\n#define SO\n#define SP\n#define SQ\n#define SR\n#define SS\n#define ST\n#define SU\n#define SV\n#define SW\n#define SX\n#define SY\n#define SZ\n#define Sa\n#define Sb\n#define Sc\n#define Sd\n#define Se\n#define Sf\n#define Sg\n#define Sh\n#define Si\n#define Sj\n#define Sk\n#define Sl\n#define Sm\n#define Sn\n#define TA\n#define TB\n#define TC\n#define TD\n#define TE\n#define TF\n#define TG\n#define TH\n#define TI\n#define TJ\n#define TK\n#define TL\n#define TM\n#define TN\n#define TO\n#define TP\n#define TQ\n#define TR\n#define TS\n#define TT\n#define TU\n#define TV\n#define TW\n#define TX\n#define TY\n#define TZ\n#define Ta\n#define Tb\n#define Tc\n#define Td\n#define Te\n#define Tf\n#define Tg\n#define Th\n#define Ti\n#define Tj\n#define Tk\n#define Tl\n#define Tm\n#define Tn\n#define UA\n#define UB\n#define UC\n#define UD\n#define UE\n#define UF\n#define UG\n#define UH\n#define UI\n#define UJ\n#define UK\n#define UL\n#define UM\n#define UN\n#define UO\n#define UP\n#define UQ\n#define UR\n#define US\n#define UT\n#define UU\n#define UV\n#define UW\n#define UX\n#define UY\n#define UZ\n#define Ua\n#define Ub\n#define Uc\n#define Ud\n#define Ue\n#define Uf\n#define Ug\n#define Uh\n#define Ui\n#define Uj\n#define Uk\n#define Ul\n#define Um\n#define Un\n#define VA\n#define VB\n#define VC\n#define VD\n#define VE\n#define VF\n#define VG\n#define VH\n#define VI\n#define VJ\n#define VK\n#define VL\n#define VM\n#define VN\n#define VO\n#define VP\n#define VQ\n#define VR\n#define VS\n#define VT\n#define VU\n#define VV\n#define VW\n#define VX\n#define VY\n#define VZ\n#define Va\n#define Vb\n#define Vc\n#define Vd\n#define Ve\n#define Vf\n#define Vg\n#define Vh\n#define Vi\n#define Vj\n#define Vk\n#define Vl\n#define Vm\n#define Vn\n#define WA\n#define WB\n#define WC\n#define WD\n#define WE\n#define WF\n#define WG\n#define WH\n#define WI\n#define WJ\n#define WK\n#define WL\n#define WM\n#define WN\n#define WO\n#define WP\n#define WQ\n#define WR\n#define WS\n#define WT\n#define WU\n#define WV\n#define WW\n#define WX\n#define WY\n#define WZ\n#define Wa\n#define Wb\n#define Wc\n#define Wd\n#define We\n#define Wf\n#define Wg\n#define Wh\n#define Wi\n#define Wj\n#define Wk\n#define Wl\n#define Wm\n#define Wn\n#define XA\n#define XB\n#define XC\n#define XD\n#define XE\n#define XF\n#define XG\n#define XH\n#define XI\n#define XJ\n#define XK\n#define XL\n#define XM\n#define XN\n#define XO\n#define XP\n#define XQ\n#define XR\n#define XS\n#define XT\n#define XU\n#define XV\n#define XW\n#define XX\n#define XY\n#define XZ\n#define Xa\n#define Xb\n#define Xc\n#define Xd\n#define Xe\n#define Xf\n#define Xg\n#define Xh\n#define Xi\n#define Xj\n#define Xk\n#define Xl\n#define Xm\n#define Xn\n#define YA\n#define YB\n#define YC\n#define YD\n#define YE\n#define YF\n#define YG\n#define YH\n#define YI\n#define YJ\n#define YK\n#define YL\n#define YM\n#define YN\n#define YO\n#define YP\n#define YQ\n#define YR\n#define YS\n#define YT\n#define YU\n#define YV\n#define YW\n#define YX\n#define YY\n#define YZ\n#define Ya\n#define Yb\n#define Yc\n#define Yd\n#define Ye\n#define Yf\n#define Yg\n#define Yh\n#define Yi\n#define Yj\n#define Yk\n#define Yl\n#define Ym\n#define Yn\n#define ZA\n#define ZB\n#define ZC\n#define ZD\n#define ZE\n#define ZF\n#define ZG\n#define ZH\n#define ZI\n#define ZJ\n#define ZK\n#define ZL\n#define ZM\n#define ZN\n#define ZO\n#define ZP\n#define ZQ\n#define ZR\n#define ZS\n#define ZT\n#define ZU\n#define ZV\n#define ZW\n#define ZX 1\n"
  },
  {
    "path": "tests/test-c/m_33_big5.c",
    "content": "/* m_33_big5.c: Wide character constant encoded in Big-Five.    */\n\n#include    \"defs.h\"\n#include    <limits.h>\n#define     BYTES_VAL   (1 << CHAR_BIT)\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 33.1:    L'ch'.  */\n\n#pragma __setlocale( \"big5\")                /* For MCPP     */\n#pragma setlocale( \"chinese-traditional\")   /* For Visual C */\n\n#if     L'r' == '\\xa6' * BYTES_VAL + '\\x72'\n    ptr = \"Wide character is encoded in Big-Five.\";\n#elif   L'r' == '\\x72' * BYTES_VAL + '\\xa6'\n    ptr = \"Wide character is encoded in Big-Five.\"\n    \"Inverted order of evaluation.\";\n#else\n    ptr = \"I cannot understand Big-Five.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand Big-Five.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_33_eucjp.c",
    "content": "/* m_33_eucjp.c:    Wide character constant encoded in EUC-JP.  */\n\n#include    \"defs.h\"\n#include    <limits.h>\n#define     BYTES_VAL   (1 << CHAR_BIT)\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 33.1:    L'ch'.  */\n\n#pragma __setlocale( \"eucjp\")               /* For MCPP     */\n#pragma setlocale( \"eucjp\")                 /* For MCPP on VC   */\n\n#if     L'' == '\\xbb' * BYTES_VAL + '\\xfa'\n    ptr = \"Wide character is encoded in EUC-JP.\";\n#elif   L'' == '\\xfa' * BYTES_VAL + '\\xbb'\n    ptr = \"Wide character is encoded in EUC-JP.\"\n    \"Inverted order of evaluation.\";\n#else\n    ptr = \"I cannot understand EUC-JP.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand EUC-JP.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_33_gb.c",
    "content": "/* m_33_gb.c:   Wide character constant encoded in GB-2312. */\n\n#include    \"defs.h\"\n#include    <limits.h>\n#define     BYTES_VAL   (1 << CHAR_BIT)\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 33.1:    L'ch'.  */\n\n#pragma __setlocale( \"gb2312\")              /* For MCPP     */\n#pragma setlocale( \"chinese-simplified\")    /* For Visual C */\n\n#if     L'' == '\\xd7' * BYTES_VAL + '\\xd6'\n    ptr = \"Wide character is encoded in GB 2312.\";\n#elif   L'' == '\\xd6' * BYTES_VAL + '\\xd7'\n    ptr = \"Wide character is encoded in GB 2312.\"\n    \"Inverted order of evaluation.\";\n#else\n    ptr = \"I cannot understand GB-2312.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand GB-2312.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_33_jis.c",
    "content": "/* m_33_jis.c:  Wide character constant encoded in ISO-2022-JP. */\n\n#include    \"defs.h\"\n#include    <limits.h>\n#define     BYTES_VAL   (1 << CHAR_BIT)\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 33.1:    L'ch'.  */\n\n#pragma __setlocale( \"jis\")                 /* For MCPP     */\n#pragma setlocale( \"jis\")                   /* For MCPP on VC   */\n\n#if     L'\u001b$B;z\u001b(B' == 0x3b * BYTES_VAL + 0x7a\n    /* This line doesn't work unless \"shift states\" are processed.  */\n    ptr = \"Wide character is encoded in ISO-2022-JP.\";\n#elif   L'\u001b$B;z\u001b(B' == 0x7a * BYTES_VAL + 0x3b\n    /* This line doesn't work unless \"shift states\" are processed.  */\n    ptr = \"Wide character is encoded in ISO-2022-JP.\"\n    \"Inverted order of evaluation.\";\n#else\n    ptr = \"I cannot understand ISO-2022-JP.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand ISO-2022-JP.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_33_ksc.c",
    "content": "/* m_33_ksc.c:  Wide character constant encoded in KSC-5601.    */\n\n#include    \"defs.h\"\n#include    <limits.h>\n#define     BYTES_VAL   (1 << CHAR_BIT)\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 33.1:    L'ch'.  */\n\n#pragma __setlocale( \"ksc5601\")             /* For MCPP     */\n#pragma setlocale( \"korean\")                /* For Visual C */\n\n#if     L'' == '\\xed' * BYTES_VAL + '\\xae'\n    ptr = \"Wide character is encoded in KSC-5601.\";\n#elif   L'' == '\\xae' * BYTES_VAL + '\\xed'\n    ptr = \"Wide character is encoded in KSC-5601.\"\n    \"Inverted order of evaluation.\";\n#else\n    ptr = \"I cannot understand KSC-5601.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand KSC-5601.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_33_sjis.c",
    "content": "/* m_33_sjis.c: Wide character constant encoded in shift-JIS.   */\n\n#include    \"defs.h\"\n#include    <limits.h>\n#define     BYTES_VAL   (1 << CHAR_BIT)\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 33.1:    L'ch'.  */\n\n#pragma __setlocale( \"sjis\")                /* For MCPP     */\n#pragma setlocale( \"japanese\")              /* For Visual C */\n\n#if     L'' == '\\x8e' * BYTES_VAL + '\\x9a'\n    ptr = \"Wide character is encoded in shift-JIS.\";\n#elif   L'' == '\\x9a' * BYTES_VAL + '\\x8e'\n    ptr = \"Wide character is encoded in shift-JIS.\"\n    \"Inverted order of evaluation.\";\n#else\n    ptr = \"I cannot understand shift-JIS.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand shift-JIS.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_33_utf8.c",
    "content": "/* m_33_utf8.c: Wide character constant encoded in UTF-8.   */\n\n#include    \"defs.h\"\n#include    <limits.h>\n#define     BYTES_VAL   (1 << CHAR_BIT)\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 33.1:    L'ch'.  */\n\n#pragma __setlocale( \"utf8\")                /* For MCPP     */\n#pragma setlocale( \"utf8\")                  /* For MCPP on VC   */\n\n#if     L'字' == '\\xe5' * BYTES_VAL * BYTES_VAL + '\\xad' * BYTES_VAL + '\\x97'\n    ptr = \"Wide character is encoded in UTF-8.\";\n#elif   L'字' == '\\x97' * BYTES_VAL * BYTES_VAL + '\\xad' * BYTES_VAL + '\\xe5'\n    ptr = \"Wide character is encoded in UTF-8.\"\n    \"Inverted order of evaluation.\";\n#else\n    ptr = \"I cannot understand UTF-8.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand UTF-8.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_34_big5.c",
    "content": "/* m_34_big5.t: Multi-byte character constant encoded in Big-Five.  */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 34.1:    */\n\n#pragma __setlocale( \"big5\")                /* For MCPP     */\n#pragma setlocale( \"chinese-traditional\")   /* For Visual C */\n\n#if     'r' == '\\xa6\\x72'\n    ptr = \"Multi-byte character is encoded in Big-Five.\";\n#else\n    ptr = \"I cannot understand Big-Five.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand Big-Five.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_34_eucjp.c",
    "content": "/* m_34_eucjp.c:    Multi-byte character constant encoded in EUC-JP.    */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 34.1:    */\n\n#pragma __setlocale( \"eucjp\")               /* For MCPP     */\n#pragma setlocale( \"eucjp\")                 /* For MCPP on VC   */\n\n#if     '' == '\\xbb\\xfa'\n    ptr = \"Multi-byte character is encoded in EUC-JP.\";\n#else\n    ptr = \"I cannot understand EUC-JP.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand EUC-JP.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_34_gb.c",
    "content": "/* m_34_gb.c:   Multi-byte character constant encoded in GB-2312.   */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 34.1:    */\n\n#pragma __setlocale( \"gb2312\")              /* For MCPP     */\n#pragma setlocale( \"chinese-simplified\")    /* For Visual C */\n\n#if     '' == '\\xd7\\xd6'\n    ptr = \"Multi-byte character is encoded in GB-2312.\";\n#else\n    ptr = \"I cannot understand GB-2312.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand GB-2312.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_34_jis.c",
    "content": "/* m_34_jis.c:  Multi-byte character constant encoded in ISO-2022-JP.   */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 34.1:    */\n\n#pragma __setlocale( \"jis\")                 /* For MCPP     */\n#pragma setlocale( \"jis\")                   /* For MCPP on VC   */\n\n#if     '\u001b$B;z\u001b(B' == '\\x3b\\x7a'\n    /* This line doesn't work unless \"shift states\" are processed.  */\n    ptr = \"Multi-byte character is encoded in ISO-2022-JP.\";\n#else\n    ptr = \"I cannot understand ISO-2022-JP.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand ISO-2022-JP.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_34_ksc.c",
    "content": "/* m_34_ksc.c:  Multi-byte character constant encoded in KSC-5601.  */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 34.1:    */\n\n#pragma __setlocale( \"ksc5601\")             /* For MCPP     */\n#pragma setlocale( \"korean\")                /* For Visual C */\n\n#if     '' == '\\xed\\xae'\n    ptr = \"Multi-byte character is encoded in KSC-5601.\";\n#else\n    ptr = \"I cannot understand KSC-5601.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand KSC-5601.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_34_sjis.c",
    "content": "/* m_34_sjis.c: Multi-byte character constant encoded in shift-JIS. */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 34.1:    */\n\n#pragma __setlocale( \"sjis\")                /* For MCPP     */\n#pragma setlocale( \"japanese\")              /* For Visual C */\n\n#if     '' == '\\x8e\\x9a'\n    ptr = \"Multi-byte character is encoded in shift-JIS.\";\n#else\n    ptr = \"I cannot understand shift-JIS.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand shift-JIS.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_34_utf8.c",
    "content": "/* m_34_utf8.c: Multi-byte character constant encoded in UTF-8. */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    char *  ptr;\n\n    fputs( \"started\\n\", stderr);\n\n/* 34.1:    */\n\n#pragma __setlocale( \"utf8\")                /* For MCPP     */\n#pragma setlocale( \"utf8\")                  /* For MCPP on VC  */\n\n#if     '字' == '\\xe5\\xad\\x97'\n    ptr = \"Multi-byte character is encoded in UTF-8.\";\n#else\n    ptr = \"I cannot understand UTF-8.\";\n#endif\n\n    assert( strcmp( ptr, \"I cannot understand UTF-8.\") != 0);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_36_big5.c",
    "content": "/* m_36_big5.c: Handling of '\\\\' in BigFive multi-byte character.    */\n\n#include    \"defs.h\"\n\n#define     str( a)     # a\n\nmain( void)\n{\n    fputs( \"started\\n\", stdout);\n\n/* 36.1:    0x5c in multi-byte character is not an escape character */\n\n#pragma __setlocale( \"big5\")                /* For MCPP     */\n#pragma setlocale( \"chinese-traditional\")   /* For Visual C */\n\n#if     'r' == '\\xa6\\x72' && '\\' != '\\xa5\\x5c'\n    fputs( \"Bad handling of '\\\\' in multi-byte character\", stdout);\n    exit( 1);\n#endif\n\n/* 36.2:    # operater should not insert '\\\\' before 0x5c in multi-byte\n        character   */\n    assert( strcmp( str( \"\\Z\"), \"\\\"\\Z\\\"\") == 0);\n    fputs( \"\\Z\" \"\\\"\\Z\\\"\\n\", stdout);\n\n    fputs( \"success\\n\", stdout);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_36_jis.c",
    "content": "/* m_36_jis.c:  Handling of '\\\\' in ISO-2022-JP multi--byte character.  */\n\n#include    \"defs.h\"\n\n#define     str( a)     # a\n\nmain( void)\n{\n    fputs( \"started\\n\", stdout);\n\n/* 36.1:    0x5c in multi-byte character is not an escape character */\n\n#pragma __setlocale( \"jis\")                 /* For MCPP     */\n#pragma setlocale( \"jis\")                   /* For MCPP on VC   */\n\n#if     '\u001b$B;z\u001b(B' == '\\x3b\\x7a' && '\u001b$B0\\\u001b(B' != '\\x30\\x5c'\n    fputs( \"Bad handling of '\\\\' in multi-byte character\", stdout);\n    exit( 1);\n#endif\n\n/* 36.2:    # operater should not insert '\\\\' before 0x5c, 0x22 or 0x27\n        in multi-byte character */\n    assert( strcmp( str( \"\u001b$B0\\F0\u001b(B\"), \"\\\"\u001b$B0\\F0\u001b(B\\\"\") == 0);\n    assert( strcmp( str( \"\u001b$B1\"M[\u001b(B\"), \"\\\"\u001b$B1\"M[\u001b(B\\\"\") == 0);\n    assert( strcmp( str( \"\u001b$B1'Ch\u001b(B\"), \"\\\"\u001b$B1'Ch\u001b(B\\\"\") == 0);\n\n    fputs( \"\u001b$B0\\F0\u001b(B\" \"\\\"\u001b$B0\\F0\u001b(B\\\"\\n\", stdout);\n    fputs( \"\u001b$B1\"M[\u001b(B\" \"\\\"\u001b$B1\"M[\u001b(B\\\"\\n\", stdout);\n    fputs( \"\u001b$B1'Ch\u001b(B\" \"\\\"\u001b$B1'Ch\u001b(B\\\"\\n\", stdout);\n    fputs( \"success\\n\", stdout);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/m_36_sjis.c",
    "content": "/* m_36_sjis.c: Handling of '\\\\' in shift-JIS multi-byte character. */\n\n#include    \"defs.h\"\n\n#define     str( a)     # a\n\nmain( void)\n{\n    fputs( \"started\\n\", stdout);\n\n/* 36.1:    0x5c in multi-byte character is not an escape character */\n\n#pragma __setlocale( \"sjis\")                /* For MCPP     */\n#pragma setlocale( \"japanese\")              /* For Visual C */\n\n#if     '' == '\\x8e\\x9a' && '\\' != '\\x95\\x5c'\n    fputs( \"Bad handling of '\\\\' in multi-byte character\", stdout);\n    exit( 1);\n#endif\n\n/* 36.2:    # operater should not insert '\\\\' before 0x5c in multi-byte\n        character   */\n    assert( strcmp( str( \"\\\"), \"\\\"\\\\\"\") == 0);\n    fputs( \"\\\" \"\\\"\\\\\"\\n\", stdout);\n\n    fputs( \"success\\n\", stdout);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_1.c",
    "content": "/* n_1.c:   Conversion of trigraph sequences.   */\n\n#include    \"defs.h\"\n\nchar    quasi_trigraph[] = { '?', '?', ' ', '?', '?', '?', ' ', '?', '?', '%',\n            ' ', '?', '?', '^', ' ', '?', '#', '\\0' };\n\nmain( void)\n{\n    int     ab = 1, cd = 2;\n\n    fputs( \"started\\n\", stderr);\n\n/* 1.1: The following 9 sequences are valid trigraph sequences. */\n    assert( strcmp( \"??( ??) ??/??/ ??' ??< ??> ??! ??- ??=\"\n            ,\"[ ] \\\\ ^ { } | ~ #\") == 0);\n\n/* 1.2: In directive line.  */\n??= define  OR( a, b)   a ??! b\n    assert( OR( ab, cd) == 3);\n\n/* 1.3: Any sequence other than above 9 is not a trigraph sequence. */\n    assert( strcmp( \"?? ??? ??% ??^ ???=\", quasi_trigraph) == 0);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_10.c",
    "content": "/* n_10.c:  #if, #elif, #else and #endif pp-directive.  */\n\n#include    \"defs.h\"\n\n#define MACRO_0     0\n#define MACRO_1     1\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 10.1:    */\n/* Note: an undefined identifier in #if expression is replaced to 0.    */\n#if     a\n    assert( a);\n#elif   MACRO_0\n    assert( MACRO_0);\n#elif   MACRO_1         /* Valid block  */\n    assert( MACRO_1);\n#else\n    assert( 0);\n#endif\n\n/* 10.2: Comments must be processed even if in skipped #if block.   */\n/* At least tokenization of string literal and character constant is necessary\n        to process comments, e.g. /* is not a comment mark in string literal.\n */\n#ifdef  UNDEFINED\n    /* Comment  */\n    \"in literal /* is not a comment\"\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_11.c",
    "content": "/* n_11.c:  Operator \"defined\" in #if or #elif directive.   */\n\n#include    \"defs.h\"\n\n#define MACRO_abc   abc\n#define MACRO_0     0\n#define ZERO_TOKEN\n\nmain( void)\n{\n    int     abc = 1, a = 0;\n\n    fputs( \"started\\n\", stderr);\n\n/* 11.1:    */\n#if     defined a\n    assert( a);\n#else\n    assert( MACRO_abc);\n#endif\n#if     defined (MACRO_abc)\n    assert( MACRO_abc);\n#else\n    assert( a);\n#endif\n\n/* 11.2:    \"defined\" is an unary operator whose result is 1 or 0.  */\n#if     defined MACRO_0 * 3 != 3\n    fputs( \"Bad handling of defined operator.\\n\", stderr);\n    exit( 1);\n#endif\n#if     (!defined ZERO_TOKEN != 0) || (-defined ZERO_TOKEN != -1)\n    fputs( \"Bad grouping of defined, -, ! in #if expression.\\n\", stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_12.c",
    "content": "/* n_12.c:  Integer preprocessing number token and type of #if expression.  */\n\n#include    \"defs.h\"\n#include    <limits.h>\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 12.1:    */\n#if     LONG_MAX <= LONG_MIN\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n    exit( 1);\n#endif\n#if     LONG_MAX <= 1073741823  /* 0x3FFFFFFF   */\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 12.2:    */\n#if     ULONG_MAX / 2 < LONG_MAX\n    fputs( \"Bad evaluation of unsigned long.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 12.3:    Octal number.   */\n#if     0177777 != 65535\n    fputs( \"Bad evaluation of octal number.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 12.4:    Hexadecimal number. */\n#if     0Xffff != 65535 || 0xFfFf != 65535\n    fputs( \"Bad evaluation of hexadecimal number.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 12.5:    Suffix 'L' or 'l'.  */\n#if     0L != 0 || 0l != 0\n    fputs( \"Bad evaluation of 'L' suffix.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 12.6:    Suffix 'U' or 'u'.  */\n#if     1U != 1 || 1u != 1\n    fputs( \"Bad evaluation of 'U' suffix.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 12.7:    Negative integer.   */\n#if     0 <= -1\n    fputs( \"Bad evaluation of negative number.\\n\", stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_13.c",
    "content": "/* n_13.c:  Valid operators in #if expression.  */\n\n/* Valid operators are (precedence in this order) :\n    defined, (unary)+, (unary)-, ~, !,\n    *, /, %,\n    +, -,\n    <<, >>,\n    <, >, <=, >=,\n    ==, !=,\n    &,\n    ^,\n    |,\n    &&,\n    ||,\n    ? :\n */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 13.1:    Bit shift.  */\n#if     1 << 2 != 4 || 8 >> 1 != 4\n    fputs( \"Bad arithmetic of <<, >> operators.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 13.2:    Bitwise operators.  */\n#if     (3 ^ 5) != 6 || (3 | 5) != 7 || (3 & 5) != 1\n    fputs( \"Bad arithmetic of ^, |, & operators.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 13.3:    Result of ||, && operators is either of 1 or 0. */\n#if     (2 || 3) != 1 || (2 && 3) != 1 || (0 || 4) != 1 || (0 && 5) != 0\n    fputs( \"Bad arithmetic of ||, && operators.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 13.4:    ?, : operator.  */\n#if     (0 ? 1 : 2) != 2\n    fputs( \"Bad arithmetic of ?: operator.\\n\", stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_13_13.c",
    "content": "/* n_13_13.c:   #if expression with macros. */\n\n#include    \"defs.h\"\n#define ZERO_TOKEN\n#define MACRO_0         0\n#define MACRO_1         1\n#define and             &&\n#define or              ||\n#define not_eq          !=\n#define bitor           |\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 13.13:   With macros expanding to operators. */\n#if     (1 bitor 2) == 3 and 4 not_eq 5 or 0\n    /* #if (1 | 2) == 3 && 4 != 5 || 0  */\n#else\n    fputs(\n    \"Bad evaluation of macros expanding to operators in #if expression.\\n\"\n            , stderr);\n    exit( 1);\n#endif\n\n/* 13.14:   With macros expanding to 0 token, nonsence but legal.   */\n#if     ZERO_TOKEN MACRO_1 ZERO_TOKEN > ZERO_TOKEN MACRO_0 ZERO_TOKEN\n    /* #if 1 > 0    */\n#else\n    fputs(\n    \"Bad evaluation of macros expanding to 0 token in #if expression.\\n\"\n            , stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_13_5.c",
    "content": "/* n_13_5.c:    Arithmetic conversion in #if expressions.   */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 13.5:    The usual arithmetic conversion is not performed on bit shift.  */\n#if     -1 << 3U > 0\n    fputs( \"Bad conversion of bit shift operands.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 13.6:    Usual arithmetic conversions.   */\n#if     -1 <= 0U        /* -1 is converted to unsigned long.    */\n    fputs( \"Bad arithmetic conversion.\\n\", stderr);\n    exit( 1);\n#endif\n\n#if     -1 * 1U <= 0\n    fputs( \"Bad arithmetic conversion.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* Second and third operands of conditional operator are converted to the\n        same type, thus -1 is converted to unsigned long.    */\n#if     (1 ? -1 : 0U) <= 0\n    fputs( \"Bad arithmetic conversion.\\n\", stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_13_7.c",
    "content": "/* n_13_7.c:    Short-circuit evaluation of #if expression. */\n\n#include    \"defs.h\"\n#define MACRO_0     0\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 13.7:    10/0 or 10/MACRO_0 are never evaluated, \"divide by zero\" error\n        cannot occur.   */\n#if     0 && 10 / 0\n    exit( 1);\n#endif\n#if     not_defined && 10 / not_defined\n    exit( 1);\n#endif\n#if     MACRO_0 && 10 / MACRO_0 > 1\n    exit( 1);\n#endif\n#if     MACRO_0 ? 10 / MACRO_0 : 0\n    exit( 1);\n#endif\n#if     MACRO_0 == 0 || 10 / MACRO_0 > 1        /* Valid block  */\n    fputs( \"success\\n\", stderr);\n    return  0;\n#else\n    exit( 1);\n#endif\n}\n\n"
  },
  {
    "path": "tests/test-c/n_13_8.c",
    "content": "/* n_13_8.c:    Grouping of sub-expressions in #if expression.  */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 13.8:    Unary operators are grouped from right to left. */\n#if (- -1 != 1) || (!!9 != 1) || (-!+!9 != -1) || (~~1 != 1)\n    fputs( \"Bad grouping of -, +, !, ~ in #if expression.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 13.9:    ?: operators are grouped from right to left.    */\n#if (1 ? 2 ? 3 ? 3 : 2 : 1 : 0) != 3\n    fputs( \"Bad grouping of ? : in #if expression.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 13.10:   Other operators are grouped from left to right. */\n#if (15 >> 2 >> 1 != 1) || (3 << 2 << 1 != 24)\n    fputs( \"Bad grouping of >>, << in #if expression.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 13.11:   Test of precedence. */\n#if 3*10/2 >> !0*2 >> !+!-9 != 1\n    fputs( \"Bad grouping of -, +, !, *, /, >> in #if expression.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 13.12:   Overall test.  Grouped as:\n        ((((((+1 - -1 - ~~1 - -!0) & 6) | ((8 % 9) ^ (-2 * -2))) >> 1) == 7)\n        ? 7 : 0) != 7\n    evaluated to FALSE.\n */\n#if (((+1- -1-~~1- -!0&6|8%9^-2*-2)>>1)==7?7:0)!=7\n    fputs( \"Bad arithmetic of #if expression.\\n\", stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_15.c",
    "content": "/* n_15.c:  #ifdef, #ifndef directives. */\n\n#include    \"defs.h\"\n\n#define MACRO_0     0\n#define MACRO_1     1\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 15.1:    #ifdef directive.   */\n#ifdef  MACRO_1     /* Valid block  */\n    assert( MACRO_1);\n#else\n    assert( MACRO_0);\n#endif\n\n/* 15.2:    #ifndef directive.  */\n#ifndef MACRO_1\n    assert( MACRO_0);\n#else               /* Valid block  */\n    assert( MACRO_1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_18.c",
    "content": "/* n_18.c:  #define directive.  */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    int     c = 3;\n\n/* Excerpts from ISO C 6.8.3 \"Examples\".    */\n#define OBJ_LIKE        (1-1)\n#define FTN_LIKE(a)     ( a )\n\n    fputs( \"started\\n\", stderr);\n\n/* 18.1:    Definition of an object-like macro. */\n    assert( OBJ_LIKE == 0);\n#define ZERO_TOKEN\n#ifndef ZERO_TOKEN\n    fputs( \"Can't define macro to 0-token.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 18.2:    Definition of a function-like macro.    */\n    assert( FTN_LIKE( c) == 3);\n\n/* 18.3:    Spelling in string identical to parameter is not a parameter.   */\n#define STR( n1, n2)    \"n1:n2\"\n    assert( strcmp( STR( 1, 2), \"n1:n2\") == 0);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_19.c",
    "content": "/* n_19.c:  Valid re-definitions of macros. */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    int     c = 1;\n\n    fputs( \"started\\n\", stderr);\n\n/* Excerpts from ISO C 6.8.3 \"Examples\".    */\n#define OBJ_LIKE        (1-1)\n#define FTN_LIKE(a)     ( a )\n\n/* 19.1:    */\n#define OBJ_LIKE    /* white space */  (1-1) /* other */\n\n/* 19.2:    */\n#define FTN_LIKE( a     )(  /* note the white space */  \\\n                        a  /* other stuff on this line\n                           */ )\n    assert( FTN_LIKE( c) == 1);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_2.c",
    "content": "/* n_2.c:   Line splicing by <backslash><newline> sequence. */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    int     ab = 1, cd = 2, ef = 3, abcde = 5;\n\n    fputs( \"started\\n\", stderr);\n\n/* 2.1: In a #define directive line, between the parameter list and the\n        replacement text.   */\n#define FUNC( a, b, c)  \\\n        a + b + c\n    assert( FUNC( ab, cd, ef) == 6);\n\n/* 2.2: In a #define directive line, among the parameter list and among the\n        replacement text.   */\n#undef  FUNC\n#define FUNC( a, b  \\\n    , c)            \\\n    a + b           \\\n    + c\n    assert (FUNC( ab, cd, ef) == 6);\n\n/* 2.3: In a string literal.    */\n    assert (strcmp( \"abc\\\nde\", \"abcde\") == 0);\n\n/* 2.4: <backslash><newline> in midst of an identifier. */\n    assert( abc\\\nde == 5);\n\n/* 2.5: <backslash><newline> by trigraph.   */\n    assert( abc??/\nde == 5);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_20.c",
    "content": "/* n_20.c:  Macro lexically identical to keyword. */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n\n/* 20.1:    */\n#define float   double\n    float   fl;\n\n    fputs( \"started\\n\", stderr);\n\n    assert( sizeof fl == sizeof (double));\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_21.c",
    "content": "/* n_21.c:  Tokenization (No preprocessing tokens are merged implicitly).   */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    int     a = 1, x = 2, y = -3;\n\n    fputs( \"started\\n\", stderr);\n\n/* 21.1:    */\n#define MINUS   -\n    assert( -MINUS-a == -1);\n\n/* 21.2:    */\n#define sub( a, b)  a-b     /* '(a)-(b)' is better  */\n#define Y   -y              /* '(-y)' is better     */\n/*  x- -y   */\n    assert( sub( x, Y) == -1);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_22.c",
    "content": "/* n_22.c:  Tokenization of preprocessing number.   */\n\n#include    \"defs.h\"\n\n#define str( a)     # a\n#define xstr( a)    str( a)\n#define EXP         1\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 22.1:    12E+EXP is a preprocessing number, EXP is not expanded. */\n    assert( strcmp( xstr( 12E+EXP), \"12E+EXP\") == 0);\n\n/* 22.2:    .2e-EXP is also a preprocessing number. */\n    assert( strcmp( xstr( .2e-EXP), \".2e-EXP\") == 0);\n\n/* 22.3:    + or - is allowed only following E or e, 12+EXP is not a\n        preprocessing number.   */\n    assert( strcmp( xstr( 12+EXP), \"12+1\") == 0);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_23.c",
    "content": "/* n_23.c:  ## operator in macro definition.    */\n\n#include    \"defs.h\"\n\n#define glue( a, b)     a ## b\n#define xglue( a, b)    glue( a, b)\n#define MACRO_0     0\n#define MACRO_1     1\n\nmain( void)\n{\n    int     xy = 1;\n\n    fputs( \"started\\n\", stderr);\n\n/* 23.1:    */\n    assert( glue( x, y) == 1);\n\n/* 23.2:    Generate a preprocessing number.    */\n#define EXP     2\n    assert( xglue( .12e+, EXP) == 12.0);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_24.c",
    "content": "/* n_24.c:  # operator in macro definition. */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 24.1:    */\n#define str( a)     # a\n    assert( strcmp( str( a+b), \"a+b\") == 0);\n\n/* 24.2:    White spaces between tokens of operand are converted to one space.\n */\n    assert( strcmp( str(    ab  /* comment */   +\n        cd  ), \"ab + cd\") == 0);\n\n/* 24.3:    \\ is inserted before \\ and \" in or surrounding literals and no\n        other character is inserted to anywhere.    */\n    assert( strcmp( str( '\"' + \"' \\\"\"), \"'\\\"' + \\\"' \\\\\\\"\\\"\") == 0);\n\n/* 24.4:    Line splicing by <backslash><newline> is done prior to token\n        parsing.   */\n    assert( strcmp( str( \"ab\\\nc\"), \"\\\"abc\\\"\") == 0);\n\n/* 24.5:    Token separator inserted by macro expansion should be removed.\n        (Meanwhile, tokens should not be merged.  See 21.2.)    */\n#define xstr( a)    str( a)\n#define f(a)        a\n    assert( strcmp( xstr( x-f(y)), \"x-y\") == 0);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_25.c",
    "content": "/* n_25.c:  Macro arguments are pre-expanded (unless the argument is an\n        operand of # or ## operator) separately, that is, are macro-replaced\n        completely prior to rescanning. */\n\n#include    \"defs.h\"\n\n#define ZERO_TOKEN\n#define MACRO_0         0\n#define MACRO_1         1\n#define TWO_ARGS        a,b\n#define sub( x, y)      (x - y)\n#define glue( a, b)     a ## b\n#define xglue( a, b)    glue( a, b)\n#define str( a)         # a\n\nmain( void)\n{\n    int     a = 1, b = 2, abc = 3, MACRO_0MACRO_1 = 2;\n\n    fputs( \"started\\n\", stderr);\n\n/* 25.1:    \"TWO_ARGS\" is read as one argument to \"sub\", then expanded to\n        \"a,b\", then \"x\" is substituted by \"a,b\".    */\n    assert( sub( TWO_ARGS, 1) == 1);\n\n/* 25.2:    An argument pre-expanded to 0-token.    */\n/*  ( - 1); */\n    assert( sub( ZERO_TOKEN, a) == -1);\n\n/* 25.3:    \"glue( a, b)\" is pre-expanded.  */\n    assert( xglue( glue( a, b), c) == 3);\n\n/* 25.4:    Operands of ## operator are not pre-expanded.   */\n    assert( glue( MACRO_0, MACRO_1) == 2);\n\n/* 25.5:    Operand of # operator is not pre-expanded.  */\n    assert( strcmp( str( ZERO_TOKEN), \"ZERO_TOKEN\") == 0);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_26.c",
    "content": "/* n_26.c:  The name once replaced is not furthur replaced. */\n\n#include    \"defs.h\"\n\nint     f( a)\n    int     a;\n{\n    return  a;\n}\n\nint     g( a)\n    int     a;\n{\n    return  a * 2;\n}\n\nmain( void)\n{\n    int     x = 1;\n    int     AB = 1;\n    int     Z[1];\n\n    fputs( \"started\\n\", stderr);\n\n    Z[0] = 1;\n\n/* 26.1:    Directly recursive object-like macro definition.    */\n/*  Z[0];   */\n#define Z   Z[0]\n    assert( Z == 1);\n\n/* 26.2:    Intermediately recursive object-like macro definition.  */\n/*  AB; */\n#define AB  BA\n#define BA  AB\n    assert( AB == 1);\n\n/* 26.3:    Directly recursive function-like macro definition.  */\n/*  x + f(x);   */\n#define f(a)    a + f(a)\n    assert( f( x) == 2);\n\n/* 26.4:    Intermediately recursive function-like macro definition.    */\n/*  x + x + g( x);  */\n#define g(a)    a + h( a)\n#define h(a)    a + g( a)\n    assert( g( x) == 4);\n\n/* 26.5:    Rescanning encounters the non-replaced macro name.  */\n/*  Z[0] + f( Z[0]);    */\n    assert( f( Z) == 2);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_27.c",
    "content": "/* n_27.c:  Rescanning of a macro replace any macro call in the replacement\n        text after substitution of parameters by pre-expanded-arguments.  This\n        re-examination may involve the succeding sequences from the source\n        file (what a queer thing!). */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    int     a = 1, b = 2, c, m = 1, n = 2;\n\n    fputs( \"started\\n\", stderr);\n\n/* 27.1:    Cascaded use of object-like macros. */\n/*  1 + 2 + 3 + 4 + 5 + 6 + 7 + 8;  */\n#define NEST8   NEST7 + 8\n#define NEST7   NEST6 + 7\n#define NEST6   NEST5 + 6\n#define NEST5   NEST4 + 5\n#define NEST4   NEST3 + 4\n#define NEST3   NEST2 + 3\n#define NEST2   NEST1 + 2\n#define NEST1   1\n    assert( NEST8 == 36);\n\n/* 27.2:    Cascaded use of function-like macros.   */\n/*  (1) + (1 + 2) + 1 + 2 + 1 + 2 + 3 + 1 + 2 + 3 + 4;  */\n#define FUNC4( a, b)    FUNC3( a, b) + NEST4\n#define FUNC3( a, b)    FUNC2( a, b) + NEST3\n#define FUNC2( a, b)    FUNC1( a, b) + NEST2\n#define FUNC1( a, b)    (a) + (b)\n    assert( FUNC4( NEST1, NEST2) == 23);\n\n/* 27.3:    An identifier generated by ## operator is subject to expansion. */\n#define glue( a, b)     a ## b\n#define MACRO_1         1\n    assert( glue( MACRO_, 1) == 1);\n\n#define sub( x, y)      (x - y)\n#define head            sub(\n#define math( op, a, b) op( (a), (b))\n\n/* 27.4:    'sub' as an argument of math() is not pre-expanded, since '(' is\n        missing.    */\n    assert( math( sub, a, b) == -1);\n\n/* 27.5:    Queer thing.    */\n    c = head a,b );\n    assert( c == -1);\n\n/* 27.6:    Recursive macro (the 2nd 'm' is expanded to 'n' since it is in\n        source file).   */\n#define m       n\n#define n( a)   a \n    c = m( m);\n    assert( c == 2);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_28.c",
    "content": "/* n_28.c:  __FILE__, __LINE__, __DATE__, __TIME__, __STDC__ and\n            __STDC_VERSION__ are predefined.    */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    char *  date = __DATE__;\n    char *  fname = __FILE__;\n\n    fputs( \"started\\n\", stderr);\n\n/* 28.1:    */\n/* Remove directory part (if any).  */\n    fname += strlen( fname) - 6;\n    assert( strcmp( fname, \"n_28.c\") == 0);\n\n/* 28.2:    */\n    assert( __LINE__ == 19);\n\n/* 28.3:    */\n    assert( strlen( __DATE__) == 11);\n    assert( date[ 4] != '0');\n\n/* 28.4:    */\n    assert( strlen( __TIME__) == 8);\n\n/* 28.5:    */\n    assert( __STDC__);\n\n/* 28.6:    */\n    assert( __STDC_VERSION__);\n\n/* 28.7:    */\n#include    \"line.h\"\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_29.c",
    "content": "/* n_29.c:  #undef directive.   */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    int     DEFINED = 1;\n\n    fputs( \"started\\n\", stderr);\n\n/* 29.1:    Undefined macro is not a macro. */\n#define DEFINED\n#undef  DEFINED\n    assert( DEFINED == 1);\n\n/* 29.2:    Undefining undefined name is not an error.  */\n#undef  UNDEFINED\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_3.c",
    "content": "/* n_3.c:   Handling of comment.    */\n\n#include    \"defs.h\"\n\n#define str( a)     # a\n\nmain( void)\n{\n    int     abcd = 4;\n\n    fputs( \"started\\n\", stderr);\n\n/* 3.1: A comment is converted to one space.    */\n    assert( strcmp( str( abc/* comment */de), \"abc de\") == 0);\n\n/* 3.2: // is not a comment of C.   */\n/*  assert( strcmp( str( //), \"//\") == 0);  */\n\n/* 3.3: Comment is parsed prior to the parsing of preprocessing directive.  */\n#if     0\n    \"nonsence\"; /*\n#else\n    still in\n    comment     */\n#else\n#define MACRO_abcd  /*\n    in comment\n    */  abcd\n#endif\n    assert( MACRO_abcd == 4);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_30.c",
    "content": "/* n_30.c:  Macro calls.    */\n/*  Note:   Comma separate the arguments of function-like macro call,\n        but comma between matching inner parenthesis doesn't.  This feature\n        is tested on so many places in this suite especially on *.c samples\n        which use assert() macro, that no separete item to test this feature\n        is provided.    */\n\n#include    \"defs.h\"\n\n#define FUNC( a, b, c)      a + b + c\n\nmain( void)\n{\n    int     a = 1, b = 2, c = 3;\n\n    fputs( \"started\\n\", stderr);\n\n/* 30.1:    A macro call crossing lines.    */\n    assert\n    (\n        FUNC\n        (\n            a,\n            b,\n            c\n        )\n        == 6\n    );\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_32.c",
    "content": "/* n_32.c:  Escape sequence in character constant in #if expression.    */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 32.1:    Character octal escape sequence.    */\n#if     '\\123' != 83\n    fputs( \"Bad evaluation of octal escape sequence.\\n\", stderr);\n    exit( 1);\n#endif\n\n/* 32.2:    Character hexadecimal escape sequence.  */\n#if     '\\x1b' != '\\033'\n    fputs( \"Bad evaluation of hexadecimal escape sequence.\\n\", stderr);\n    exit( 1);\n#endif\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_37.c",
    "content": "/* n_37.c:  Translation limits. */\n\n#include    \"defs.h\"\n\n/* 37.1:    Number of parameters in macro: at least 31. */\n#define glue31(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C,D,E)   \\\n    a##b##c##d##e##f##g##h##i##j##k##l##m##n##o##p##q##r##s##t##u##v##w##x##y##z##A##B##C##D##E\n\nmain( void)\n{\n    int     ABCDEFGHIJKLMNOPQRSTUVWXYZabcde = 31;\n    int     ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ = 30;\n    int     nest = 0;\n\n    fputs( \"started\\n\", stderr);\n\n/* 37.2:    Number of arguments in macro call: at least 31. */\n    assert(\n        glue31( A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R\n            , S, T, U, V, W, X, Y, Z, a, b, c, d, e)\n        == 31);\n\n/* 37.3:    Significant initial characters in an internal identifier or a\n        macro name: at least 31.  */\n    assert( ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ == 30);\n\n/* 37.4:    Nested conditional inclusion: at least 8 levels.    */\n    nest = 0;\n#ifdef  A\n#else\n#   ifdef   B\n#   else\n#       ifdef   C\n#       else\n#           ifdef   D\n#           else\n#               ifdef   E\n#               else\n#                   ifdef   F\n#                   else\n#                       ifdef   G\n#                       else\n#                           ifdef   H\n#                           else\n                                nest = 8;\n#                           endif\n#                       endif\n#                   endif\n#               endif\n#           endif\n#       endif\n#   endif\n#endif\n    assert( nest == 8);\n\n/* 37.5:    Nested source file inclusion: at least 8 levels.    */\n    nest = 0;\n#include    \"nest1.h\"\n    assert( nest == 8);\n\n/* 37.6:    Parenthesized expression: at least 32 levels.   */\n#if     0 + (1 - (2 + (3 - (4 + (5 - (6 + (7 - (8 + (9 - (10 + (11 - (12 +  \\\n        (13 - (14 + (15 - (16 + (17 - (18 + (19 - (20 + (21 - (22 + (23 -   \\\n        (24 + (25 - (26 + (27 - (28 + (29 - (30 + (31 - (32 + 0))))))))))   \\\n        )))))))))))))))))))))) == 0\n    nest = 32;\n#endif\n    assert( nest == 32);\n\n/* 37.7:    Characters in a string (after concatenation): at least 509. */\n    {\n        char *  extremely_long_string1 =\n\"123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n012345678901234567\"\n        ;\n        assert( strlen( extremely_long_string1) == 507);\n    }\n\n/* 37.8:    Characters in a logical source line: at least 509.  */\n    {\n    int a123456789012345678901234567890 = 123450;   \\\n    int b123456789012345678901234567890 = 123451;   \\\n    int c123456789012345678901234567890 = 123452;   \\\n    int d123456789012345678901234567890 = 123453;   \\\n    int e123456789012345678901234567890 = 123454;   \\\n    int f123456789012345678901234567890 = 123455;   \\\n    int A123456789012345678901234567890 = 123456;   \\\n    int B123456789012345678901234567890 = 123457;   \\\n    int C123456789012345678901234567890 = 123458;   \\\n    int D1234567890123456789012 = 123459;\n        assert( a123456789012345678901234567890 == 123450\n            && D1234567890123456789012 == 123459);\n    }\n\n/* 37.9:    Macro definitions: at least 1024.   */\n#include    \"m1024.h\"\n    assert( ZX);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_3_4.c",
    "content": "/* n_3_4.c: Handling of comment and <backslash><newline>.   */\n\n/* 3.4: Comment and <backslash><newline> in #error line.    */\n#error  Message of first physical line. \\\n    Message of second physical and first logical line.  /*\n    this comment splices the lines\n    */  Message of forth physical and third logical line.\n\n"
  },
  {
    "path": "tests/test-c/n_4.c",
    "content": "/* n_4.c:   Special tokens. */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n/* 4.1: Digraph spellings in directive line.    */\n%: define  stringize( a)    %: a\n\n    fputs( \"started\\n\", stderr);\n\n    assert( strcmp( stringize( abc), \"abc\") == 0);\n\n/* 4.2: Digraph spellings are retained in stringization.    */\n    assert( strcmp( stringize( <:), \"<\" \":\") == 0);\n\n    fputs( \"success\\n\", stderr);\n    return 0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_5.c",
    "content": "/* n_5.c:   Spaces or tabs are allowed at any place in pp-directive line,\n        including between the top of a pp-directive line and '#', and between\n        the '#' and the directive. */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    int     abcde = 5;\n/*  |**|[TAB]# |**|[TAB]define |**| MACRO_abcde[TAB]|**| abcde |**| */\n/**/\t# /**/\tdefine /**/ MACRO_abcde\t/**/ abcde /**/\n\n    fputs( \"started\\n\", stderr);\n\n    assert( MACRO_abcde == 5);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_6.c",
    "content": "/* n_6.c:   #include directive. */\n\n#include    \"defs.h\"\n/* 6.1: Header-name quoted by \" and \" as well as by < and > can include\n        standard headers.   */\n/* Note: Standard headers can be included any times.    */\n#include    \"ctype.h\"\n#include    <ctype.h>\n\nmain( void)\n{\n    int     abc = 3;\n\n    fputs( \"started\\n\", stderr);\n\n    assert( isalpha( 'a'));\n\n/* 6.2: Macro is allowed in #include line.  */\n#define HEADER  \"header.h\"\n#include    HEADER\n    assert( MACRO_abc == 3);\n\n/* 6.3: With macro nonsence but legal.  */\n#undef  MACRO_abc\n#define ZERO_TOKEN\n#include    ZERO_TOKEN HEADER ZERO_TOKEN\n    assert( MACRO_abc == 3);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_7.c",
    "content": "/* n_7.c:   #line directive.    */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n\n/* 7.1: Line number and filename.   */\n#line   1234    \"cpp\"\n    assert( __LINE__ == 1234);\n    assert( strcmp( __FILE__, \"cpp\") == 0);\n\n/* 7.2: Filename argument is optional.  */\n#line   2345\n    assert( __LINE__ == 2345);\n    assert( strcmp( __FILE__, \"cpp\") == 0);\n\n/* 7.3: Argument with macro.    */\n#define LINE_AND_FILENAME   1234 \"n_7.c\"\n#line   LINE_AND_FILENAME\n    assert( __LINE__ == 1234);\n    assert( strcmp( __FILE__, \"n_7.c\") == 0);\n\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_8.c",
    "content": "/* n_8.c:   #error directive.   */\n\n/* 8.1:     Argument of #error directive is not a subject of macro expansion.\n    Output to stderr as an example:\n        Preprocessing error directive: MACRO is not a positive number.\n        from line 10 of file \"n_8.c\"\n */\n#define MACRO   0\n#if MACRO <= 0\n#error MACRO is not a positive number.\n#endif\n\n"
  },
  {
    "path": "tests/test-c/n_8_2.c",
    "content": "/* n_8_2.c:     Argument of #error is optional. */\n\n/* 8.2:     #error should be executed.  */\n#error\n\n"
  },
  {
    "path": "tests/test-c/n_9.c",
    "content": "/* n_9.c:   #pragma directive.  */\n\n#include    \"stdio.h\"\n\n/* 9.1: Any #pragma directive should be processed or ignored, should not\n        be diagnosed as an error.   */\n#pragma once\n#pragma who knows ?\n\nmain( void)\n{\n    fputs( \"started\\n\", stderr);\n    fputs( \"success\\n\", stderr);\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/n_i_.lst",
    "content": "n_1\nn_2\nn_3\nn_4\nn_5\nn_6\nn_7\nn_9\nn_10\nn_11\nn_12\nn_13\nn_13_5\nn_13_7\nn_13_8\nn_13_13\nn_15\nn_18\nn_19\nn_20\nn_21\nn_22\nn_23\nn_24\nn_25\nn_26\nn_27\nn_28\nn_29\nn_30\nn_32\nn_37\ni_32_3\ni_35\ni_35_3\n"
  },
  {
    "path": "tests/test-c/n_std.c",
    "content": "/*\n *      n_std.c\n *\n * 1998/08      made public                                     kmatsui\n * 2002/08      revised not to conflict with C99 Standard       kmatsui\n * 2004/10      added a few testcases for macro expansion       kmatsui\n *\n *   Samples to test Standard C preprocessing.\n *   This is a strictly-comforming program.\n *   Any Standard-comforming translator must translate successfully this\n * program.  The generated execution program must be executed with the message\n * <End of \"n_std.c\"> on stdout and no other messages on stderr.\n *   A translator must process also #error directive properly, which is not\n * included here because the directive might cause translator to terminate.\n */\n\n\n#include    \"defs.h\"\n\n#define ZERO_TOKEN\n#define TWO_ARGS        a,b\n#define MACRO_0         0\n#define MACRO_1         1\n#define sub( x, y)      (x - y)\n#define str( a)         # a\n#define xstr( a)        str( a)\n#define glue( a, b)     a ## b\n#define xglue( a, b)    glue( a, b)\n\nvoid    n_1( void);\nvoid    n_2( void);\nvoid    n_3( void);\nvoid    n_4( void);\nvoid    n_5( void);\nvoid    n_6( void);\nvoid    n_7( void);\nvoid    n_9( void);\nvoid    n_10( void);\nvoid    n_11( void);\nvoid    n_12( void);\nvoid    n_13( void);\nvoid    n_13_5( void);\nvoid    n_13_7( void);\nvoid    n_13_8( void);\nvoid    n_13_13( void);\nvoid    n_15( void);\nvoid    n_18( void);\nvoid    n_19( void);\nvoid    n_20( void);\nvoid    n_21( void);\nvoid    n_22( void);\nvoid    n_23( void);\nvoid    n_24( void);\nvoid    n_25( void);\nvoid    n_26( void);\nvoid    n_27( void);\nvoid    n_28( void);\nvoid    n_29( void);\nvoid    n_30( void);\nvoid    n_32( void);\nvoid    n_37( void);\n\nint main( void)\n{\n    //n_1();  trigraphs test removed\n    n_2();\n    n_3();\n    //n_4();  digraphs test removed\n    n_5();\n    n_6();\n    n_7();\n    n_9();\n    n_10();\n    n_11();\n    n_12();\n    n_13();\n    n_13_5();\n    n_13_7();\n    n_13_8();\n    n_13_13();\n    n_15();\n    n_18();\n    n_19();\n    n_20();\n    n_21();\n    n_22();\n    n_23();\n    n_24();\n    n_25();\n    n_26();\n    n_27();\n    n_28();\n    n_29();\n    n_30();\n    n_32();\n    n_37();\n    puts( \"<End of \\\"n_std.c\\\">\");\n    return  0;\n}\n\nchar    quasi_trigraph[] = { '?', '?', ' ', '?', '?', '?', ' '\n            , '?', '?', '%', ' ', '?', '?', '^', ' ', '?', '#', '\\0' };\n\nvoid    n_2( void)\n/*      Line splicing by <backslash><newline> sequence. */\n{\n    int     ab = 1, cd = 2, ef = 3, abcde = 5;\n\n/* 2.1: In a #define directive line, between the parameter list and the\n        replacement text.   */\n#define FUNC( a, b, c)  \\\n        a + b + c\n    assert( FUNC( ab, cd, ef) == 6);\n\n/* 2.2: In a #define directive line, among the parameter list and among the\n        replacement text.   */\n#undef  FUNC\n#define FUNC( a, b  \\\n    , c)            \\\n    a + b           \\\n    + c\n    assert (FUNC( ab, cd, ef) == 6);\n\n/* 2.3: In a string literal.    */\n    assert (strcmp( \"abc\\\nde\", \"abcde\") == 0);\n\n/* 2.4: <backslash><newline> in midst of an identifier. */\n    assert( abc\\\nde == 5);\n\n}\n\nvoid    n_3( void)\n/*      Handling of comment.    */\n{\n    int     abcd = 4;\n\n/* 3.1: A comment is converted to one space.    */\n    assert( strcmp( str( abc/* comment */de), \"abc de\") == 0);\n\n/* 3.2: // is not a comment of C.   */\n/*    assert( strcmp( str( //), \"//\") == 0);    */\n\n/* 3.3: Comment is parsed prior to the parsing of preprocessing directive.  */\n#if     0\n    \"nonsence\"; /*\n#else\n    still in\n    comment     */\n#else\n#define MACRO_abcd  /*\n    in comment\n    */  abcd\n#endif\n    assert( MACRO_abcd == 4);\n}\n\nvoid    n_5( void)\n/*      Spaces or tabs are allowed at any place in pp-directive line,\n        including between the top of a pp-directive line and '#', and between\n        the '#' and the directive. */\n{\n    int     abcde = 5;\n/*  |**|[TAB]# |**|[TAB]define |**| MACRO_abcde[TAB]|**| abcde |**| */\n/**/    # /**/  define /**/ MACRO_abcde /**/ abcde /**/\n    assert( MACRO_abcde == 5);\n}\n\n/* 6.1: Header-name quoted by \" and \" as well as by < and > can include\n        standard headers.   */\n/* Note: Standard headers can be included any times.    */\n#ifndef NO_SYSTEM_HEADERS\n#include    \"ctype.h\"\n#include    <ctype.h>\n#endif\n\nvoid    n_6( void)\n/*      #include directive. */\n{\n    int     abc = 3;\n\n    assert( isalpha( 'a'));\n\n/* 6.2: Macro is allowed in #include line.  */\n#define HEADER  \"header.h\"\n#include    HEADER\n    assert( MACRO_abc == 3);\n\n/* 6.3: With macro nonsence but legal.  */\n#undef  MACRO_abc\n#include    ZERO_TOKEN HEADER ZERO_TOKEN\n    assert( MACRO_abc == 3);\n}\n\nvoid    n_7( void)\n/*      #line directive.    */\n{\n/* 7.1: Line number and filename.   */\n#line   1234    \"cpp\"\n    assert( __LINE__ == 1234);\n    assert( strcmp( __FILE__, \"cpp\") == 0);\n\n/* 7.2: Filename argument is optional.  */\n#line   2345\n    assert( __LINE__ == 2345);\n    assert( strcmp( __FILE__, \"cpp\") == 0);\n\n/* 7.3: Argument with macro.    */\n#define LINE_AND_FILENAME   1234 \"n_7.c\"\n#line   LINE_AND_FILENAME\n    assert( __LINE__ == 1234);\n    assert( strcmp( __FILE__, \"n_7.c\") == 0);\n}\n\n/* Restore to correct line number and filename. */\n#line   218 \"n_std.c\"\n\nvoid    n_9( void)\n/*      #pragma directive.  */\n{\n/* 9.1: Any #pragma directive should be processed or ignored, should not\n        be diagnosed as an error.   */\n#pragma once\n#pragma who knows ?\n}\n\nvoid    n_10( void)\n/*      #if, #elif, #else and #endif pp-directive.  */\n{\n/* 10.1:    */\n/* Note: an undefined identifier in #if expression is replaced to 0.    */\n#if     a\n    assert( a);\n#elif   MACRO_0\n    assert( MACRO_0);\n#elif   MACRO_1         /* Valid block  */\n    assert( MACRO_1);\n#else\n    assert( 0);\n#endif\n\n/* 10.2:    Comments must be processed even if in skipped #if block.    */\n/* At least tokenization of string literal and character constant is necessary\n        to process comments, e.g. /* is not a comment mark in string literal.\n */\n#ifdef  UNDEFINED\n    /* Comment  */\n    \"in literal /* is not a comment\"\n#endif\n}\n\nvoid    n_11( void)\n/*      Operator \"defined\" in #if or #elif directive.   */\n{\n    int     abc = 1, a = 0;\n\n/* 11.1:    */\n#undef  MACRO_abc\n#define MACRO_abc   abc\n#if     defined a\n    assert( a);\n#else\n    assert( MACRO_abc);\n#endif\n#if     defined (MACRO_abc)\n    assert( MACRO_abc);\n#else\n    assert( a);\n#endif\n\n/* 11.2:    \"defined\" is an unary operator whose result is 1 or 0.  */\n#if     defined MACRO_0 * 3 != 3\n    fputs( \"Bad handling of defined operator.\\n\", stderr);\n#endif\n#if     (!defined ZERO_TOKEN != 0) || (-defined ZERO_TOKEN != -1)\n    fputs( \"Bad grouping of defined, -, ! in #if expression.\\n\", stderr);\n#endif\n}\n\n#ifndef NO_SYSTEM_HEADERS\n#include    <limits.h>\n#endif\n\nvoid    n_12( void)\n/*      Integer preprocessing number token and type of #if expression.  */\n{\n/* 12.1:    */\n#if     LONG_MAX <= LONG_MIN\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n#endif\n#if     LONG_MAX <= 1073741823  /* 0x3FFFFFFF   */\n    fputs( \"Bad evaluation of long.\\n\", stderr);\n#endif\n\n/* 12.2:    */\n#if     ULONG_MAX / 2 < LONG_MAX\n    fputs( \"Bad evaluation of unsigned long.\\n\", stderr);\n#endif\n\n/* 12.3:    Octal number.   */\n#if     0177777 != 65535\n    fputs( \"Bad evaluation of octal number.\\n\", stderr);\n#endif\n\n/* 12.4:    Hexadecimal number. */\n#if     0Xffff != 65535 || 0XFfFf != 65535\n    fputs( \"Bad evaluation of hexadecimal number.\\n\", stderr);\n#endif\n\n/* 12.5:    Suffix 'L' or 'l'.  */\n#if     0L != 0 || 0l != 0\n    fputs( \"Bad evaluation of 'L' suffix.\\n\", stderr);\n#endif\n\n/* 12.6:    Suffix 'U' or 'u'.  */\n#if     1U != 1 || 1u != 1\n    fputs( \"Bad evaluation of 'U' suffix.\\n\", stderr);\n#endif\n\n/* 12.7:    Negative integer.   */\n#if     0 <= -1\n    fputs( \"Bad evaluation of negative number.\\n\", stderr);\n#endif\n}\n\nvoid    n_13( void)\n/*      evaluation of #if expressions.  */\n/* Valid operators are (precedence in this order) :\n    defined, (unary)+, (unary)-, ~, !,\n    *, /, %,\n    +, -,\n    <<, >>,\n    <, >, <=, >=,\n    ==, !=,\n    &,\n    ^,\n    |,\n    &&,\n    ||,\n    ? :\n */\n{\n/* 13.1:    Bit shift.  */\n#if     1 << 2 != 4 || 8 >> 1 != 4\n    fputs( \"Bad arithmetic of <<, >> operators.\\n\", stderr);\n#endif\n\n/* 13.2:    Bitwise operators.  */\n#if     (3 ^ 5) != 6 || (3 | 5) != 7 || (3 & 5) != 1\n    fputs( \"Bad arithmetic of ^, |, & operators.\\n\", stderr);\n#endif\n\n/* 13.3:    Result of ||, && operators is either of 1 or 0. */\n#if     (2 || 3) != 1 || (2 && 3) != 1 || (0 || 4) != 1 || (0 && 5) != 0\n    fputs( \"Bad arithmetic of ||, && operators.\\n\", stderr);\n#endif\n\n/* 13.4:    ?, : operator.  */\n#if     (0 ? 1 : 2) != 2\n    fputs( \"Bad arithmetic of ?: operator.\\n\", stderr);\n#endif\n}\n\nvoid    n_13_5( void)\n/*      Arithmetic conversion in #if expressions.   */\n{\n/* 13.5:    The usual arithmetic conversion is not performed on bit shift.  */\n#if     -1 << 3U > 0\n    fputs( \"Bad conversion of bit shift operands.\\n\", stderr);\n#endif\n\n/* 13.6:    Usual arithmetic conversions.   */\n#if     -1 <= 0U        /* -1 is converted to unsigned long.    */\n    fputs( \"Bad arithmetic conversion.\\n\", stderr);\n#endif\n\n#if     -1 * 1U <= 0\n    fputs( \"Bad arithmetic conversion.\\n\", stderr);\n#endif\n\n/* Second and third operands of conditional operator are converted to the\n        same type, thus -1 is converted to unsigned long.    */\n#if     (1 ? -1 : 0U) <= 0\n    fputs( \"Bad arithmetic conversion.\\n\", stderr);\n#endif\n}\n\nvoid    n_13_7( void)\n/*      Short-circuit evaluation of #if expression. */\n{\n/* 13.7:    10/0 or 10/MACRO_0 are never evaluated, \"divide by zero\" error\n        cannot occur.   */\n\n#if     0 && 10 / 0\n#endif\n#if     not_defined && 10 / not_defined\n#endif\n#if     MACRO_0 && 10 / MACRO_0 > 1\n#endif\n#if     MACRO_0 ? 10 / MACRO_0 : 0\n#endif\n#if     MACRO_0 == 0 || 10 / MACRO_0 > 1        /* Valid block  */\n#else\n#endif\n}\n\nvoid    n_13_8( void)\n/*      Grouping of sub-expressions in #if expression.  */\n{\n/* 13.8:    Unary operators are grouped from right to left. */\n#if     (- -1 != 1) || (!!9 != 1) || (-!+!9 != -1) || (~~1 != 1)\n    fputs( \"Bad grouping of -, +, !, ~ in #if expression.\\n\", stderr);\n#endif\n\n/* 13.9:    ?: operators are grouped from right to left.    */\n#if     (1 ? 2 ? 3 ? 3 : 2 : 1 : 0) != 3\n    fputs( \"Bad grouping of ? : in #if expression.\\n\", stderr);\n#endif\n\n/* 13.10:   Other operators are grouped from left to right. */\n#if     (15 >> 2 >> 1 != 1) || (3 << 2 << 1 != 24)\n    fputs( \"Bad grouping of >>, << in #if expression.\\n\", stderr);\n#endif\n\n/* 13.11:   Test of precedence. */\n#if     3*10/2 >> !0*2 >> !+!-9 != 1\n    fputs( \"Bad grouping of -, +, !, *, /, >> in #if expression.\\n\", stderr);\n#endif\n\n/* 13.12:   Overall test.  Grouped as:\n        ((((((+1 - -1 - ~~1 - -!0) & 6) | ((8 % 9) ^ (-2 * -2))) >> 1) == 7)\n        ? 7 : 0) != 7\n    evaluated to FALSE.\n */\n#if     (((+1- -1-~~1- -!0&6|8%9^-2*-2)>>1)==7?7:0)!=7\n    fputs( \"Bad arithmetic of #if expression.\\n\", stderr);\n#endif\n}\n\nvoid    n_13_13( void)\n/*      #if expression with macros. */\n{\n#define and             &&\n#define or              ||\n#define not_eq          !=\n#define bitor           |\n\n/* 13.13:   With macros expanding to operators. */\n#if     (1 bitor 2) == 3 and 4 not_eq 5 or 0\n    /* #if (1 | 2) == 3 && 4 != 5 || 0  */\n#else\n    fputs(\n    \"Bad evaluation of macros expanding to operators in #if expression.\\n\"\n        , stderr);\n#endif\n\n/* 13.14:   With macros expanding to 0 token, nonsence but legal.   */\n#if     ZERO_TOKEN MACRO_1 ZERO_TOKEN > ZERO_TOKEN MACRO_0 ZERO_TOKEN\n    /* #if 1 > 0    */\n#else\n    fputs(\n    \"Bad evaluation of macros expanding to 0 token in #if expression.\\n\"\n        , stderr);\n#endif\n}\n\nvoid    n_15( void)\n/*      #ifdef, #ifndef directives. */\n{\n\n/* 15.1:    #ifdef directive.   */\n#ifdef  MACRO_1         /* Valid block  */\n    assert( MACRO_1);\n#else\n    assert( MACRO_0);\n#endif\n\n/* 15.2:    #ifndef directive.  */\n#ifndef MACRO_1\n    assert( MACRO_0);\n#else               /* Valid block  */\n    assert( MACRO_1);\n#endif\n}\n\nvoid    n_18( void)\n/*      #define directive.  */\n/* Excerpts from ISO C 6.8.3 \"Examples\".    */\n#define OBJ_LIKE        (1-1)\n#define FTN_LIKE(a)     ( a )\n{\n    int     c = 3;\n\n/* 18.1:    Definition of an object-like macro. */\n    assert( OBJ_LIKE == 0);\n#ifndef ZERO_TOKEN\n    fputs( \"Can't define macro to 0-token.\\n\", stderr);\n#endif\n\n/* 18.2:    Definition of a function-like macro.    */\n    assert( FTN_LIKE( c) == 3);\n\n/* 18.3:    Spelling in string identical to parameter is not a parameter.   */\n#define STR( n1, n2)    \"n1:n2\"\n    assert( strcmp( STR( 1, 2), \"n1:n2\") == 0);\n}\n\nvoid    n_19( void)\n/*      Valid re-definitions of macros. */\n{\n    int     c = 1;\n\n/* 19.1:    */\n#define OBJ_LIKE    /* white space */  (1-1) /* other */\n\n/* 19.2:    */\n#define FTN_LIKE( a     )(  /* note the white space */  \\\n                        a  /* other stuff on this line\n                           */ )\n    assert( FTN_LIKE( c) == 1);\n}\n\nvoid    n_20( void)\n/*      Macro lexically identical to keyword. */\n{\n/* 20.1:    */\n#define float   double\n    float   fl;\n    assert( sizeof fl == sizeof (double));\n}\n\nvoid    n_21( void)\n/*      Tokenization (No preprocessing tokens are merged implicitly).   */\n{\n    int     a = 1, x = 2, y = -3;\n\n/* 21.1:    */\n#define MINUS   -\n    assert( -MINUS-a == -1);\n\n/* 21.2:    */\n#undef  sub\n#define sub( a, b)  a-b     /* '(a)-(b)' is better  */\n#define Y   -y              /* '(-y)' is better     */\n/*  x- -y   */\n    assert( sub( x, Y) == -1);\n}\n\nvoid    n_22( void)\n/*      Tokenization of preprocessing number.   */\n{\n#define EXP         1\n\n/* 22.1:    12E+EXP is a preprocessing number, EXP is not expanded. */\n    assert( strcmp( xstr( 12E+EXP), \"12E+EXP\") == 0);\n\n/* 22.2:    .2e-EXP is also a preprocessing number. */\n    assert( strcmp( xstr( .2e-EXP), \".2e-EXP\") == 0);\n\n/* 22.3:    + or - is allowed only following E or e, 12+EXP is not a\n        preprocessing number.   */\n    assert( strcmp( xstr( 12+EXP), \"12+1\") == 0);\n}\n\nvoid    n_23( void)\n/*      ## operator in macro definition.    */\n{\n    int     xy = 1;\n\n/* 23.1:    */\n    assert( glue( x, y) == 1);\n\n/* 23.2:    Generate a preprocessing number.    */\n#undef  EXP\n#define EXP     2\n    assert( xglue( .12e+, EXP) == 12.0);\n}\n\nvoid    n_24( void)\n/*      # operator in macro definition. */\n{\n/* 24.1:    */\n    assert( strcmp( str( a+b), \"a+b\") == 0);\n\n/* 24.2:    White spaces between tokens of operand are converted to one space.\n */\n    assert( strcmp( str(    ab  /* comment */   +\n        cd  ), \"ab + cd\") == 0);\n\n/* 24.3:    \\ is inserted before \\ and \" in or surrounding literals and no\n        other character is inserted to anywhere.    */\n    assert( strcmp( str( '\"' + \"' \\\"\"), \"'\\\"' + \\\"' \\\\\\\"\\\"\") == 0);\n\n/* 24.4:    Line splicing by <backslash><newline> is done prior to token\n        parsing.   */\n    assert( strcmp( str( \"ab\\\nc\"), \"\\\"abc\\\"\") == 0);\n\n/* 24.5:    Token separator inserted by macro expansion should be removed.\n        (Meanwhile, tokens should not be merged.  See 21.2.)    */\n#define f(a)        a\n    assert( strcmp( xstr( x-f(y)), \"x-y\") == 0);\n}\n\nvoid    n_25( void)\n/*      Macro arguments are pre-expanded (unless the argument is an\n        operand of # or ## operator) separately, that is, are macro-replaced\n        completely prior to rescanning. */\n{\n    int     a = 1, b = 2, abc = 3, MACRO_0MACRO_1 = 2;\n\n#undef sub\n#define sub( x, y)      (x - y)\n\n/* 25.1:    \"TWO_ARGS\" is read as one argument to \"sub\", then expanded to\n        \"a,b\", then \"x\" is substituted by \"a,b\".    */\n    assert( sub( TWO_ARGS, 1) == 1);\n\n/* 25.2:    An argument pre-expanded to 0-token.    */\n    assert( sub( ZERO_TOKEN, a) == -1);\n\n/* 25.3:    \"glue( a, b)\" is pre-expanded.  */\n    assert( xglue( glue( a, b), c) == 3);\n\n/* 25.4:    Operands of ## operator are not pre-expanded.   */\n    assert( glue( MACRO_0, MACRO_1) == 2);\n\n/* 25.5:    Operand of # operator is not pre-expanded.  */\n    assert( strcmp( str( ZERO_TOKEN), \"ZERO_TOKEN\") == 0);\n}\n\n#undef  f\n\n#ifdef  void\nint     f( a)\n    int     a;\n{\n    return  a;\n}\n\nint     g( a)\n    int     a;\n{\n    return  a * 2;\n}\n#else\nint     f( int a)\n{\n    return  a;\n}\n\nint     g( int a)\n{\n    return  a * 2;\n}\n#endif\n\nvoid    n_26( void)\n/*      The name once replaced is not furthur replaced. */\n{\n    int     x = 1;\n    int     AB = 1;\n    int     Z[1];\n    Z[0] = 1;\n\n/* 26.1:    Directly recursive macro definition.    */\n/*  Z[0];   */\n#define Z   Z[0]\n    assert( Z == 1);\n\n/* 26.2:    Intermediately recursive macro definition.  */\n/*  AB; */\n#define AB  BA\n#define BA  AB\n    assert( AB == 1);\n\n/* 26.3:    Directly recursive function-like macro definition.  */\n/*  x + f(x);   */\n#define f(a)    a + f(a)\n    assert( f( x) == 2);\n\n/* 26.4:    Intermediately recursive function-like macro definition.    */\n/*  x + x + g( x);  */\n#define g(a)    a + h( a)\n#define h(a)    a + g( a)\n    assert( g( x) == 4);\n\n/* 26.5:    Rescanning encounters the non-replaced macro name.  */\n/*  Z[0] + f( Z[0]);    */\n    assert( f( Z) == 2);\n}\n\nvoid    n_27( void)\n/*      Rescanning of a macro raplace any macro call in the replacement\n        text after substitution of parameters by pre-expanded-arguments.  This\n        re-examination may involve the succeding sequences from the source\n        file (what a queer thing!). */\n{\n    int     a = 1, b = 2, c, m = 1, n = 2;\n\n/* 27.1:    Cascaded use of object-like macros. */\n#define NEST8   NEST7 + 8\n#define NEST7   NEST6 + 7\n#define NEST6   NEST5 + 6\n#define NEST5   NEST4 + 5\n#define NEST4   NEST3 + 4\n#define NEST3   NEST2 + 3\n#define NEST2   NEST1 + 2\n#define NEST1   1\n    assert( NEST8 == 36);\n\n/* 27.2:    Cascaded use of function-like macros.   */\n#define FUNC4( a, b)    FUNC3( a, b) + NEST4\n#define FUNC3( a, b)    FUNC2( a, b) + NEST3\n#define FUNC2( a, b)    FUNC1( a, b) + NEST2\n#define FUNC1( a, b)    (a) + (b)\n    assert( FUNC4( NEST1, NEST2) == 23);\n\n/* 27.3:    An identifier generated by ## operator is subject to expansion. */\n    assert( glue( MACRO_, 1) == 1);\n\n#define head            sub(\n#define math( op, a, b) op( (a), (b))\n\n/* 27.4:    'sub' as an argument of math() is not pre-expanded, since '(' is\n        missing.    */\n    assert( math( sub, a, b) == -1);\n\n/* 27.5:    Queer thing.    */\n    c = head a,b );\n    assert( c == -1);\n\n/* 27.6:    Recursive macro (the 2nd 'm' is expanded to 'n' since it is in\n        source file).   */\n#define m       n\n#define n( a)   a \n    assert( m( m) == 2);\n}\n\nvoid    n_28( void)\n/*      __FILE__, __LINE__, __DATE__, __TIME__, __STDC__ and\n        __STDC_VERSION are predefined.  */\n{\n    char *  date = __DATE__;\n\n/* 28.1:    */\n    assert( strcmp( __FILE__, \"n_std.c\") == 0);\n\n/* 28.2:    */\n    assert( __LINE__ == 751);\n\n/* 28.3:    */\n    assert( strlen( __DATE__) == 11);\n    assert( date[ 4] != '0');\n\n/* 28.4:    */\n    assert( strlen( __TIME__) == 8);\n\n/* 28.5:    */\n    assert( __STDC__);\n\n/* 28.6:    */\n    assert( __STDC_VERSION__ >= 199409L);\n\n/* 28.7:    */\n#include    \"line.h\"\n}\n\nvoid    n_29( void)\n/*      #undef directive.   */\n{\n    int     DEFINED = 1;\n\n/* 29.1:    Undefined macro is not a macro. */\n#define DEFINED\n#undef  DEFINED\n    assert( DEFINED == 1);\n\n/* 29.2:    Undefining undefined name is not an error.  */\n#undef  UNDEFINED\n}\n\nvoid    n_30( void)\n/*      Macro calls.    */\n/*  Note:   Comma separate the arguments of function-like macro call,\n        but comma between matching inner parenthesis doesn't.  This feature\n        is tested on so many places in this suite especially on *.c samples\n        which use assert() macro, that no separete item to test this feature\n        is provided.    */\n{\n#undef  FUNC\n#define FUNC( a, b, c)      a + b + c\n\n    int     a = 1, b = 2, c = 3;\n\n/* 30.1:    A macro may cross lines.    */\n    assert\n    (\n        FUNC\n        (\n            a,\n            b,\n            c\n        )\n        == 6\n    );\n}\n\nvoid    n_32( void)\n/*      Escape sequence in character constant in #if expression.    */\n{\n/* 32.1:    Character octal escape sequence.    */\n#if     '\\123' != 83\n    fputs( \"Bad evaluation of octal escape sequence.\\n\", stderr);\n#endif\n\n/* 32.2:    Character hexadecimal escape sequence.  */\n#if     '\\x1b' != '\\033'\n    fputs( \"Bad evaluation of hexadecimal escape sequence.\\n\", stderr);\n#endif\n}\n\nvoid    n_37( void)\n/* Translation limits.  */\n{\n#define     MACRO_8     8\n\n/* 37.1:    Number of parameters in macro: at least 31. */\n#define glue31(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C,D,E)   \\\n    a##b##c##d##e##f##g##h##i##j##k##l##m##n##o##p##q##r##s##t##u##v##w##x##y##z##A##B##C##D##E\n\n    int     ABCDEFGHIJKLMNOPQRSTUVWXYZabcde = 31;\n    int     ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ = 30;\n    int     nest = 0;\n\n/* 37.2:    Number of arguments in macro call: at least 31. */\n    assert(\n        glue31( A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R\n            , S, T, U, V, W, X, Y, Z, a, b, c, d, e)\n        == 31);\n\n/* 37.3:    Significant initial characters in an internal identifier or a\n        macro name: at least 31.  */\n    assert( ABCDEFGHIJKLMNOPQRSTUVWXYZabcd_ == 30);\n\n/* 37.4:    Nested conditional inclusion: at least 8 levels.    */\n    nest = 0;\n#ifdef  A\n#else\n#   ifdef   B\n#   else\n#       ifdef   C\n#       else\n#           ifdef   D\n#           else\n#               ifdef   E\n#               else\n#                   ifdef   F\n#                   else\n#                       ifdef   G\n#                       else\n#                           ifdef   H\n#                           else\n                                nest = 8;\n#                           endif\n#                       endif\n#                   endif\n#               endif\n#           endif\n#       endif\n#   endif\n#endif\n    assert( nest == 8);\n\n/* 37.5:    Nested source file inclusion: at least 8 levels.    */\n    nest = 0;\n#include    \"nest1.h\"\n    assert( nest == 8);\n\n/* 37.6:    Parenthesized expression: at least 32 levels.   */\n#if     0 + (1 - (2 + (3 - (4 + (5 - (6 + (7 - (8 + (9 - (10 + (11 - (12 +  \\\n        (13 - (14 + (15 - (16 + (17 - (18 + (19 - (20 + (21 - (22 + (23 -   \\\n        (24 + (25 - (26 + (27 - (28 + (29 - (30 + (31 - (32 + 0))))))))))   \\\n        )))))))))))))))))))))) == 0\n    nest = 32;\n#endif\n    assert( nest == 32);\n\n/* 37.7:    Characters in a string (after concatenation): at least 509. */\n    {\n        char *  extremely_long_string =\n\"123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n0123456789012345678901234567890123456789012345678901234567890123456789\\\n012345678901234567\"\n        ;\n        assert( strlen( extremely_long_string) == 507);\n    }\n\n/* 37.8:    Characters in a logical source line: at least 509.  */\n    {\n    int a123456789012345678901234567890 = 123450;   \\\n    int b123456789012345678901234567890 = 123451;   \\\n    int c123456789012345678901234567890 = 123452;   \\\n    int d123456789012345678901234567890 = 123453;   \\\n    int e123456789012345678901234567890 = 123454;   \\\n    int f123456789012345678901234567890 = 123455;   \\\n    int A123456789012345678901234567890 = 123456;   \\\n    int B123456789012345678901234567890 = 123457;   \\\n    int C123456789012345678901234567890 = 123458;   \\\n    int D1234567890123456789012 = 123459;\n        assert( a123456789012345678901234567890 == 123450\n            && D1234567890123456789012 == 123459);\n    }\n\n/* 37.9:    Macro definitions: at least 1024.   */\n#undef  AB\n#undef  BA\n#undef  OR\n\n#include    \"m1024.h\"\n    assert( ZX);\n}\n\n"
  },
  {
    "path": "tests/test-c/nest1.h",
    "content": "/* nest1.h  */\n\n    nest = 1;\n\n#include    \"nest2.h\"\n"
  },
  {
    "path": "tests/test-c/nest10.h",
    "content": "/* nest10.h */\n#include \"nest11.h\"\n"
  },
  {
    "path": "tests/test-c/nest11.h",
    "content": "/* nest11.h */\n#include \"nest12.h\"\n"
  },
  {
    "path": "tests/test-c/nest12.h",
    "content": "/* nest12.h */\n#include \"nest13.h\"\n"
  },
  {
    "path": "tests/test-c/nest13.h",
    "content": "/* nest13.h */\n#include \"nest14.h\"\n"
  },
  {
    "path": "tests/test-c/nest14.h",
    "content": "/* nest14.h */\n#include \"nest15.h\"\n"
  },
  {
    "path": "tests/test-c/nest15.h",
    "content": "/* nest15.h */\n#ifdef  X0F\n    nest = 0x0f;\n#endif\n"
  },
  {
    "path": "tests/test-c/nest2.h",
    "content": "/* nest2.h  */\n\n    nest = 2;\n\n#include    \"nest3.h\"\n"
  },
  {
    "path": "tests/test-c/nest3.h",
    "content": "/* nest3.h  */\n\n    nest = 3;\n\n#include    \"nest4.h\"\n"
  },
  {
    "path": "tests/test-c/nest4.h",
    "content": "/* nest4.h  */\n\n    nest = 4;\n\n#include    \"nest5.h\"\n"
  },
  {
    "path": "tests/test-c/nest5.h",
    "content": "/* nest5.h  */\n\n    nest = 5;\n\n#include    \"nest6.h\"\n"
  },
  {
    "path": "tests/test-c/nest6.h",
    "content": "/* nest6.h  */\n\n    nest = 6;\n\n#include    \"nest7.h\"\n"
  },
  {
    "path": "tests/test-c/nest7.h",
    "content": "/* nest7.h  */\n\n    nest = 7;\n\n#include    \"nest8.h\"\n"
  },
  {
    "path": "tests/test-c/nest8.h",
    "content": "/* nest8.h  */\n\n#ifndef X0F\n    nest = 8;\n#else\n#include    \"nest9.h\"\n#endif\n\n"
  },
  {
    "path": "tests/test-c/nest9.h",
    "content": "/* nest9.h */\n#include \"nest10.h\"\n"
  },
  {
    "path": "tests/test-c/side_cpp",
    "content": "                                        \n                                        \n                                        \n                                        \n                                        \n                                        \n                                        \n                                        \n\nn_1:    Trigraph sequences.             \nn_2:    Line splicing by <backslash>.   \nn_3:    Handling of comment.            \nn_4:    Tokens spelled by digraphs.     \nn_5:    Spaces or tabs in pp-directive. \nn_6:    #include directive.             \nn_7:    #line directive.                \nn_9:    #pragma directive.              \nn_10:   #if, #elif pp-directive.        \nn_11:   Operator \"defined\" in #if.      \nn_12:   Pp-number and type of #if expr. \nn_13:   Valid operators in #if expr.    \nn_13.5: Usual arithmetic conversion.    \nn_13.7: Short-circuit evaluation of #if.\nn_13.8: Grouping of #if sub-expressions.\nn_13.13:    #if expression with macros. \nn_15:   #ifdef, #ifndef directives.     \nn_18:   #define directive.              \nn_19:   Valid re-definitions of macros. \nn_20:   Macro name identical to keyword.\nn_21:   Tokenization (no token merging).\nn_22:   Tokenization of pp-number.      \nn_23:   ## operator in macro definition.\nn_24:   # operator in macro definition. \nn_25:   Pre-expansion of macro args.    \nn_26:   No recursive replacement.       \nn_27:   Rescanning of a macro.          \nn_28:   Standard pre-defined macros.    \nn_29:   #undef directive.               \nn_30:   Macro call crossing lines.      \nn_32:   Escape sequence in char-const.  \nn_37:   Translation limits.             \ni_32.3: Character constant in #if.      \ni_35:   Multi-character character const.\ni_35.3: Multi-character wide character. \n\nTotal number of tests unpassed.         \n"
  },
  {
    "path": "tests/test-c/u_1_1.c",
    "content": "/* u_1_1.c:     Undefined behaviors on unterminated line, comment or macro. */\n\nmain( void)\n{\n\n/* u.1.1:   End of a source file without <newline>. */\n#include    \"unbal3.h\"\nint e_1;\n\n/* u.1.2:   End of a source file with <backslash><newline>. */\n#include    \"unbal4.h\"\n;\n\n/* u.1.3:   End of a source file with an unterminated comment.  */\n#include    \"unbal5.h\"\n*/\n\n/* u.1.4:   End of a source file with an uncompleted macro call.    */\n#include    \"unbal6.h\"\n    y);\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_11.c",
    "content": "/* u_1_11.c:    Undefined behaviors on undefined #include syntax or header-\n        name.   */\n\n/* u.1.11:  Header-name containing ', \", \\ or \"/*\". */\n/*  Probably illegal filename and fails to open.    */\n#include    \"../*line.h\"\n\nmain( void)\n{\n/*  \\ is a legal path-delimiter in MS-DOS or some other OS's.   */\n#include    \"..\\test-t\\line.h\"\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_12.c",
    "content": "/* u_1_12.c:    Undefined behaviors on undefined #include syntax or header-\n        name.   */\n\n/* u.1.12:  Argument of #include other than header-name.    */\n#include    filename\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_13.c",
    "content": "/* u_1_13.c:    Undefined behaviors on undefined #include syntax or header-\n        name.   */\n\n/* u.1.13:  Excessive argument in #include directive.   */\n#include    <assert.h>  Junk\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_14.c",
    "content": "/* u_1_14.c:    Undefined behaviors on undefined #line syntax.  */\n\nmain( void)\n{\n\n/* u.1.14:  #line directive without an argument of line number. */\n#line   \"filename\"\n\n/* u.1.15:  #line directive with the second argument of other than string\n    literal.    */\n#line   1234    filename\n\n/* u.1.16:  Excessive argument in #line directive.  */\n#line   2345    \"filename\"  Junk\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_17.c",
    "content": "/* u_1_17.c:    Undefined behaviors on out-of-range #line number.   */\n\n#include    <stdio.h>\n\nmain( void)\n{\n\n/* u.1.17:  Line number argument of #line directive should be in range of\n        [1,32767].  */\n#line   32767   /* valid here   */\n/* line 32767   */\n/* line 32768 ? : out of range  */\n    printf( \"%d\\n\", __LINE__);\n                    /* 32769 ? or -32767 ?, maybe warned as an out-of-range */\n#line   0\n#line   32768\n\n/* u.1.18:  Line number argument of #line directive should be written in\n        decimal digits. */\n#line   0x1000\n\n/*  23, u_1_17.c or other undefined results.    */\n    printf( \"%d, %s\\n\", __LINE__, __FILE__);\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_19.c",
    "content": "/* u_1_19.c:    Undefined behaviors on undefined #define and #undef syntax. */\n\n#include    <stdio.h>\n\nmain( void)\n{\n\n/* u.1.19:  A macro expanded to \"defined\" in #if expression.    */\n#define DEFINED     defined\n#if     DEFINED DEFINED\n#endif\n\n#undef  __linux__\n#undef  __arm__\n#define __linux__   1\n#define HAVE_MREMAP defined(__linux__) && !defined(__arm__)\n/* Wrong macro definition.\n * This macro should be defined as follows.\n *  #if defined(__linux__) && !defined(__arm__)\n *  #define HAVE_MREMAP 1\n *  #endif\n */\n#if HAVE_MREMAP\n    mremap();\n#endif\n\n/* u.1.20:  Undefining __FILE__, __LINE__, __DATE__, __TIME__, __STDC__ or\n        \"defined\" in #undef directive.  */\n#undef  __LINE__\n/*  31 or other undefined result.   */\n    printf( \"%d\\n\", __LINE__);\n\n/* u.1.21:  Defining __FILE__, __LINE__, __DATE__, __TIME__, __STDC__ or\n        \"defined\" in #define directive. */\n#define __LINE__    1234\n/*  37 or other undefined result.   */\n    printf( \"%d\\n\", __LINE__);\n#define defined     defined\n#if     defined defined\n#   error   I am not a good preprocessor.\n#endif\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_22.c",
    "content": "/* u_1_22.c:    Undefined behaviors on generating invalid pp-token by ##\n        operator.   */\n\n#include    <stdio.h>\n#define str( a)     # a\n\nmain( void)\n{\n\n/* u.1.22:  Result of ## operator is not a valid pp-token.  */\n#define NUM( dig, exp)  dig ## E+ ## exp\n/*\n *   \"E+\" is a sequence of two pp-tokens \"E\" and \"+\", not a single pp-token.\n * The first ## concatenates the last pp-token of first argument with \"E\",\n * and the second ## concatenates \"+\" with the first pp-token of the second\n * argument.\n *   While \"12E\" (one of the sequence generated by the token concatenation)\n * is a valid pp-token, \"+34\" (the another sequence) is not a valid pp-token\n * and causes an undefined behavior.\n */\n    printf( \"%e\\n\", NUM( 12, 34));\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_23.c",
    "content": "/* u_1_23.c:    Undefined behaviors on generating invalid pp-token by #\n        operator.   */\n\n#include    <stdio.h>\n#define str( a)     # a\n\nmain( void)\n{\n\n/*  \"\\\\\"\\\"\";    This sequence is parsed to three tokens \"\\\\\" \\ \"\", and will be\n        diagnosed by compiler-proper unless diagnosed by preprocessor.  */\n    puts( str( \\\"\"));\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_24.c",
    "content": "/* u_1_24.c:    Undefined behaviors on empty argument of macro call.    */\n\n/* u.1.24:  Empty argument of macro call.   */\n/*\n *   Note: Since no argument and one empty argument cannot be distinguished\n * syntactically, additional dummy argument may be necessary for an\n * intermediate macro to process one empty argument (if possible).\n */\n\n#include    <stdio.h>\n\n#define ARG( a, dummy)      # a\n#define EMPTY\n#define SHOWN( n)       printf( \"%s : %d\\n\", # n, n)\n#define SHOWS( s)       printf( \"%s : %s\\n\", # s, ARG( s, dummy))\n#define add( a, b)      (a + b)\n#define sub( a, b)      (a - b)\n#define math( op, a, b)     op( a, b)\n#define APPEND( a, b)       a ## b\n\nmain( void)\n{\n    int     x = 1;\n    int     y = 2;\n\n/*  printf( \"%s : %d\\n\", \"math( sub, , y)\", ( - y));\n        or other undefined behavior.    */\n    SHOWN( math( sub, , y));\n\n/*  printf( \"%s : %s\\n\", \"EMPTY\", \"\");\n        or other undefined behavior.    */\n    SHOWS( EMPTY);\n\n/*  printf( \"%s : %s\\n\", \"APPEND( CON, 1)\", \"CON1\");    */\n    SHOWS( APPEND( CON, 1));\n\n/*  printf( \"%s : %s\\n\", \"APPEND( CON, )\", \"CON\");\n        or other undefined behavior.    */\n    SHOWS( APPEND( CON, ));\n\n/*  printf( \"%s : %s\\n\", \"APPEND( , )\", \"\");\n        or other undefined behavior.    */\n    SHOWS( APPEND( , ));\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_25.c",
    "content": "/* u_1_25.c:    Undefined behaviors on undefined macro argument.    */\n\n#include    <stdio.h>\n#define str( a)     # a\n#define sub( x, y)  (x - y)\n#define SUB         sub\n\nmain( void)\n{\n    int     a = 1, b = 2;\n\n/* u.1.25:  Macro argument otherwise parsed as a directive. */\n/*  \"#define NAME\"; or other undefined behaviour.   */\n    puts( str(\n#define NAME\n    ));\n\n#if 0   /* Added by C90: Corrigendum 1 (1994) and deleted by C99    */\n/* u.1.26:  Expanded macro replacement list end with name of function-like\n        macro.  */\n    SUB( a, b);\n#endif\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_27.c",
    "content": "/* u_1_27.c:    Pseudo-directive-line.  */\n\n/* u.1.27:  Unknown preprocessing directive (other than #pragma).   */\n#ifdefined MACRO\n#endif              /* The second error.    */\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_28.c",
    "content": "/* u_1_28.c:    Macro expanding to name identical to directive. */\n\n#define D   define\n/* u.1.28:  There are following two ways of preprocessing.\n    1:  \"D\" isn't expanded, because # is the first token of the line.\n        Preprocessor reports that \"D\" is an unknown directive.\n    2:  \"D\" is expanded, because that is not a directive.\n        Compiler-phase will diagnose the output of preprocess.\n    Anyway, preprocessor should not interprete this line as a preprocessing\n    directive.  */\n#D  A   B\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_5.c",
    "content": "/* u_1_5.c:     Undefined behaviors on illegal characters.  */\n\n/* u.1.5:   Illegal characters (in other than string literal, character\n        constant, header-name or comment).  */\n#if     1 ||\u001e2\u001f\n/*    0x01e ^ ^ 0x1f    */\n#endif  /* Maybe the second error.  */\n\n/* u.1.6:   [VT], [FF] in directive line.   */\n#if     1 ||\u000b2\f\n/*     [VT] ^ ^ [FF]    */\n#endif  /* Maybe the second error.  */\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_7_big5.c",
    "content": "/* u_1_7_big5.c:    Invalid multi-byte character sequence (in string literal,\n        character constant, header-name or comment).    */\n\n#define str( a)     # a\n#pragma setlocale( \"chinese-traditional\")   /* For Visual C */\n#pragma __setlocale( \"big5\")                /* For MCPP     */\n\nmain( void)\n{\n    char *  cp = str( \"\");  /* 0xa181   */\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_7_eucjp.c",
    "content": "/* u_1_7_eucjp.c:   Invalid multi-byte character sequence (in string literal,\n        character constant, header-name or comment).    */\n\n#define str( a)     # a\n#pragma __setlocale( \"eucjp\")               /* For MCPP     */\n\nmain( void)\n{\n    char *  cp = str( \"\");  /* 0xb1a0   */\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_7_gb.c",
    "content": "/* u_1_7_gb.c:  Invalid multi-byte character sequence (in string literal,\n        character constant, header-name or comment).    */\n\n#define str( a)     # a\n#pragma setlocale( \"chinese-simplified\")    /* For Visual C */\n#pragma __setlocale( \"gb2312\")              /* For MCPP     */\n\nmain( void)\n{\n    char *  cp = str( \"\");  /* 0xb1a0   */\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_7_jis.c",
    "content": "/* u_1_7_jis.c: Invalid multi-byte character sequence (in string literal,\n        character constant, header-name or comment).    */\n\n#define str( a)     # a\n#pragma __setlocale( \"jis\")                 /* For MCPP     */\n\nmain( void)\n{\n    char *  cp = str( \"\u001b$B1 \u001b(B\");  /* 0x3120   */\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_7_ksc.c",
    "content": "/* u_1_7_ksc.c: Invalid multi-byte character sequence (in string literal,\n        character constant, header-name or comment).    */\n\n#define str( a)     # a\n#pragma setlocale( \"korean\")                /* For Visual C */\n#pragma __setlocale( \"ksc5601\")             /* For MCPP     */\n\nmain( void)\n{\n    char *  cp = str( \"\");  /* 0xb1a0   */\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_7_sjis.c",
    "content": "/* u_1_7_sjis.c:    Invalid multi-byte character sequence (in string literal,\n        character constant, header-name or comment).    */\n\n#define str( a)     # a\n#pragma setlocale( \"japanese\")              /* For Visual C */\n#pragma __setlocale( \"sjis\")                /* For MCPP     */\n\nmain( void)\n{\n    char *  cp = str( \"8\");  /* 0x9138   */\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_7_utf8.c",
    "content": "/* u_1_7_utf8.c:    Invalid multi-byte character sequence (in string literal,\n        character constant, header-name or comment).    */\n\n#define str( a)     # a\n#pragma __setlocale( \"utf8\")                /* For MCPP     */\n\nmain( void)\n{\n    char *  cp = str( \"字\");   /* 0xe5ad97 : legal */\n    char *  ecp1 = str( \"\");   /* 0xc0af   : overlong  */\n    char *  ecp2 = str( \"\");   /* 0xe09fbf : overlong  */\n    char *  ecp3 = str( \"\");   /* 0xeda080 : UTF-16 surrogate  */\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/u_1_8.c",
    "content": "/* u_1_8.c:     Undefined behaviors on unterminated quotations. */\n\n/* u.1.8:   Unterminated character constant.    */\n/*  The following \"comment\" may not interpreted as a comment but swallowed by\n        the unterminated character constant.    */\n#error  I can't understand. /* Token error prior to execution of #error.    */\n\nmain( void)\n{\n/* u.1.9:   Unterminated string literal.    */\n    char *  string =\n    \"String literal\n    across the lines.\n\"\n;\n    return  0;\n}\n\n/* u.1.10:  Unterminated header-name.   */\n#include    <assert.h\n\n"
  },
  {
    "path": "tests/test-c/u_2.c",
    "content": "/* u_2.c:   Undefined behaviors on undefined constant expression.   */\n\n/* u.2.1:   Undefined escape sequence.  */\n#if     '\\x'\n#endif\n\n/* u.2.2:   Illegal bit shift count.    */\n#if     1 << -1\n#endif\n#if     1 << 64\n#endif\n\nmain( void)\n{\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/unbal1.h",
    "content": "/* unbal1.h */\n#endif\n"
  },
  {
    "path": "tests/test-c/unbal2.h",
    "content": "/* unbal2.h */\n\n#define UNBAL2  1\n\n#if     UNBAL2      /* line 5   */\n#else\n"
  },
  {
    "path": "tests/test-c/unbal3.h",
    "content": "/* unbal3.h */\nint unbal3;"
  },
  {
    "path": "tests/test-c/unbal4.h",
    "content": "/* unbal4.h */\nint unbal4\\\n"
  },
  {
    "path": "tests/test-c/unbal5.h",
    "content": "/* unbal5.h */\nint unbal5;     /* unterminated comment\n"
  },
  {
    "path": "tests/test-c/unbal6.h",
    "content": "/* unbal6.h */\n#define UNBAL6( a, b)    ((a) + (b))\n    UNBAL6\n        (\n        x\n        ,\n"
  },
  {
    "path": "tests/test-c/undefs.c",
    "content": "/*\n *  undefs.c:\n * 1998/08          made public                                 kmatsui\n * 2002/12          slightly revised                            kmatsui\n * 2003/11          slightly revised                            kmatsui\n *\n *   Each of the following texts causes undefined behavior.  Good pre-\n * processor will diagnose or warn at these texts, or at least document the\n * behavior on these cases.\n */\n\n#include    <stdio.h>\n\n#define str( a)     # a\n\nmain( void)\n{\n    int     a = 1, b = 2;\n    char *  string;\n\n/* u.1.1:   End of a source file without <newline>. */\n#include    \"unbal3.h\"\nint e_1;\n\n/* u.1.2:   End of a source file with <backslash><newline>. */\n#include    \"unbal4.h\"\n;\n\n/* u.1.3:   End of a source file with an unterminated comment.  */\n#include    \"unbal5.h\"\n*/\n\n/* u.1.4:   End of a source file with an uncompleted macro call.    */\n#include    \"unbal6.h\"\n    y);\n\n/* u.1.5:   Illegal characters (in other than string literal, character\n        constant, header-name or comment).  */\n#if     1 ||\u001e2\u001f\n/*    0x01e ^ ^ 0x1f    */\n#endif  /* Maybe the second error.  */\n\n/* u.1.6:   [VT], [FF] in directive line.   */\n#if     1 ||\u000b2\f\n/*     [VT] ^ ^ [FF]    */\n#endif  /* Maybe the second error.  */\n\n/* u.1.7:   Invalid multi-byte character sequence (in string literal,\n        character constant, header-name or comment).    */\n#if     '== 0x8e\n/*       ^ 0x8e */\n#endif  /* Maybe the second error.  */\n\n/* u.1.8:   Unterminated character constant.    */\n/*  The following \"comment\" may not interpreted as a comment but swallowed by\n        the unterminated character constant.    */\n#error  I can't understand. /* Token error prior to execution of #error.    */\n\n/* u.1.9:   Unterminated string literal.    */\n    string =\n    \"String literal\n    across the lines.\n\"\n;\n\n/* u.1.10:  Unterminated header-name.   */\n#include    <assert.h\n\n/* u.1.11:  Header-name containing ', \", \\ or \"/*\". */\n/*  Probably illegal filename and fails to open.    */\n#include    \"../*line.h\"\n/*  \\ is a legal path-delimiter in MS-DOS or some other OS's.   */\n#include    \"..\\test-c\\line.h\"\n\n/* u.1.12:  Argument of #include other than header-name.    */\n#include    filename\n\n/* u.1.13:  Excessive argument in #include directive.   */\n#include    <assert.h>  Junk\n\n/* u.1.14:  #line directive without an argument of line number. */\n#line   \"filename\"\n\n/* u.1.15:  #line directive with the second argument of other than string\n        literal.    */\n#line   1234    filename\n\n/* u.1.16:  Excessive argument in #line directive.  */\n#line   2345    \"filename\"  Junk\n\n/*  90, undefs.c or other undefined results.    */\n    printf( \"%d, %s\\n\", __LINE__, __FILE__);\n\n/* u.1.17:  Line number argument of #line directive should be in range of\n        [1,32767].  */\n#line   32767   /* valid here   */\n/* line 32767   */\n/* line 32768 ? : out of range  */\n    printf( \"%d\\n\", __LINE__);\n                    /* 32769 ? or -32767 ?, maybe warned as an out-of-range */\n#line   0\n#line   32768\n\n/* u.1.18:  Line number argument of #line directive should be written in\n        decimal digits. */\n#line   0x1000\n\n#line   108     /* Restore to correct line number.  */\n\n/* u.1.19:  A macro expanded to \"defined\" in #if expression.    */\n#define DEFINED     defined\n#if     DEFINED DEFINED\n#endif\n\n#undef  __linux__\n#undef  __arm__\n#define __linux__   1\n#define HAVE_MREMAP defined(__linux__) && !defined(__arm__)\n/* Wrong macro definition.\n * This macro should be defined as follows.\n *  #if defined(__linux__) && !defined(__arm__)\n *  #define HAVE_MREMAP 1\n *  #endif\n */\n#if HAVE_MREMAP\n    mremap();\n#endif\n\n/* u.1.20:  Undefining __FILE__, __LINE__, __DATE__, __TIME__, __STDC__,\n        __STDC_VERSION__ or \"defined\" in #undef directive.  */\n#undef  __LINE__\n/*  131 or other undefined result.  */\n    printf( \"%d\\n\", __LINE__);\n\n/* u.1.21:  Defining __FILE__, __LINE__, __DATE__, __TIME__, __STDC__,\n        __STDC_VERSION__ or \"defined\" in #define directive. */\n#define __LINE__    1234\n/*  137 or other undefined result.  */\n    printf( \"%d\\n\", __LINE__);\n#define defined     defined\n#if     defined defined\n#   error   I am not a good preprocessor.\n#endif\n\n/* u.1.22:  Result of ## operator is not a valid pp-token.  */\n#define NUM( dig, exp)  dig ## E+ ## exp\n/*\n *   \"E+\" is a sequence of two pp-tokens \"E\" and \"+\", not a single pp-token.\n * The first ## concatenates the last pp-token of first argument with \"E\",\n * and the second ## concatenates \"+\" with the first pp-token of the second\n * argument.\n *   While \"12E\" (one of the sequence generated by the token concatenation)\n * is a valid pp-token, \"+34\" (the another sequence) is not a valid pp-token\n * and causes an undefined behavior.\n */\n    printf( \"%e\\n\", NUM( 12, 34));\n\n/* u.1.23:  Result of # operator is not a valid string literal. */\n/*  \"\\\\\"\\\"\";    This sequence is parsed to three tokens \"\\\\\" \\ \"\", and will be\n        diagnosed by compiler-proper unless diagnosed by preprocessor.  */\n    puts( str( \\\"\"));\n\n/* u.1.24:  Empty argument of macro call.   */\n/*\n *   Note: Since no argument and one empty argument cannot be distinguished\n * syntactically, additional dummy argument may be necessary for an\n * intermediate macro to process one empty argument (if possible).\n */\n\n#define ARG( a, dummy)      # a\n#define EMPTY\n#define SHOWN( n)       printf( \"%s : %d\\n\", # n, n)\n#define SHOWS( s)       printf( \"%s : %s\\n\", # s, ARG( s, dummy))\n#define add( a, b)      (a + b)\n#define sub( a, b)      (a - b)\n#define math( op, a, b)     op( a, b)\n#define APPEND( a, b)       a ## b\n\n/*  printf( \"%s : %d\\n\", \"math( sub, , y)\", ( - y));\n        or other undefined behavior.    */\n    SHOWN( math( sub, , y));\n\n/*  printf( \"%s : %s\\n\", \"EMPTY\", \"\");\n        or other undefined behavior.    */\n    SHOWS( EMPTY);\n\n/*  printf( \"%s : %s\\n\", \"APPEND( CON, 1)\", \"CON1\");    */\n    SHOWS( APPEND( CON, 1));\n\n/*  printf( \"%s : %s\\n\", \"APPEND( CON, )\", \"CON\");\n        or other undefined behavior.    */\n    SHOWS( APPEND( CON, ));\n\n/*  printf( \"%s : %s\\n\", \"APPEND( , )\", \"\");\n        or other undefined behavior.    */\n    SHOWS( APPEND( , ));\n\n/* u.1.25:  Macro argument otherwise parsed as a directive. */\n/*  \"#define NAME\"; or other undefined behaviour.   */\n    puts( str(\n#define NAME\n    ));\n\n#define sub( x, y)      (x - y)\n#define SUB             sub\n\n#if 0   /* Added by C90: Corrigendum 1 (1994) and deleted by C99    */\n/* u.1.26:  Expanded macro replacement list end with name of function-like\n        macro.  */\n    SUB( a, b);\n#endif\n\n/* u.1.27:  Unknown preprocessing directive (other than #pragma).   */\n#ifdefined MACRO\n#endif              /* The second error.    */\n\n#define D   define\n/* u.1.28:  There are following two ways of preprocessing.\n    1:  \"D\" isn't expanded, because # is the first token of the line.\n        Preprocessor reports that \"D\" is an unknown directive.\n    2:  \"D\" is expanded, because that is not a directive.\n        Compiler-phase will diagnose the output of preprocess.\n    Anyway, preprocessor should not interprete this line as a preprocessing\n    directive.\n */\n#D  A   B\n\n/* u.2.1:   Undefined escape sequence.  */\n#if     '\\x'\n#endif\n\n/* u.2.2:   Illegal bit shift count.    */\n#if     1 << -1\n#endif\n#if     1 << 64\n#endif\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/unspcs.c",
    "content": "/*\n *  unspcs.c:\n * 1998/08      made public                                     kmatsui\n *\n *   These texts are unportable ones, because the order of the evaluation is\n * unspecified.  Good preprocessor will warn at these texts even if the\n * results are valid.  Good preprocessor will also document the order of\n * evaluation and the behavior on invalid results.\n *   Note: Order of evaluation of sub-expressions (other than operands of &&,\n * ||, ? :) of #if expression is also unspecified.  The order, however, never\n * affects the result, because #if expression never cause side effect, so no\n * warning is necessary.  Precedence and grouping rules of operators are other\n * things than order of evaluation, and shall be obeyed by preprocessor.\n */\n\n#include    \"defs.h\"\n\n#define str( a)     # a\n#define xstr( a)    str( a)\n\nmain( void)\n{\n/* s.1.1:   Order of evaluation of #, ## operators. */\n#define MAKWIDESTR( s)  L ## # s\n/*  Either of L\"name\"; or L# name; (\"L#\" is not a valid pp-token).  */\n    assert( MAKWIDESTR( name)[ 0] == L'n');\n\n/* s.1.2:   Order of evaluation of ## operators.    */\n#define glue3( a, b, c)     a ## b ## c\n/*  \"1.a\" or undefined, since .a is not a valid pp-token, while 1. and 1.a are\n        valid pp-tokens.    */\n    puts( xstr( glue3( 1, ., a)));\n\n    return  0;\n}\n\n"
  },
  {
    "path": "tests/test-c/warns.c",
    "content": "/*\n *  warns.c:\n * 1998/08      made public                                     kmatsui\n * 2002/12      slightly modified                               kmatsui\n * 2003/11      added a few samples                             kmatsui\n */\n\n/*\n *   The following texts are legal but suspicious ones.  Good preprocessor\n * will warn at these texts.\n */\n\n#include    \"defs.h\"\n\nmain( void)\n{\n/* w.1.1:   \"/*\" in comment.    */\n/*  comment /*  nested comment and no closing   */\n\n/* w.1.2:   Rescanning of replacement text involves succeding text. */\n#define sub( x, y)      (x - y)\n#define head            sub(\n    int     a = 1, b = 2, c;\n    c = head a,b );\n    assert( c == -1);\n\n#define OBJECT_LIKE     FUNCTION_LIKE\n#define FUNCTION_LIKE( x, y)    (x + y)\n    c = OBJECT_LIKE( a, b);\n    assert( c == 3);\n\n/* w.2.1:   Negative number converted to positive in #if expression.    */\n#if     -1 < 0U\n#endif\n\n/* w.2.2:   Out of range of unsigned type (wraps around and never overflow)\n        in #if expression.  */\n#if     0U - 1\n#endif\n\n/*\n *   The following texts are legal but non-portable ones, since these requires\n * translation limits greater than the minima quaranteed by C90.  Good\n * preprocessor will warn at these texts (at least when user wants), unless\n * it diagnose these as errors.\n */\n\n    {\n    int     nest;\n    int     A0B0C0D0E0F0G0H0O0O1O2O3P0P1P2 = 123;\n\n/* w.3.1:   Number of parameters in macro: more than 31.    */\n#define glue63(    \\\n    a0, b0, c0, d0, e0, f0, g0, h0, i0, j0, k0, l0, m0, n0, o0, p0, \\\n    a1, b1, c1, d1, e1, f1, g1, h1, i1, j1, k1, l1, m1, n1, o1, p1, \\\n    a2, b2, c2, d2, e2, f2, g2, h2, i2, j2, k2, l2, m2, n2, o2, p2, \\\n    a3, b3, c3, d3, e3, f3, g3, h3, i3, j3, k3, l3, m3, n3, o3)     \\\n    a0 ## b0 ## c0 ## d0 ## e0 ## f0 ## g0 ## h0 ## \\\n    o0 ## o1 ## o2 ## o3 ## p0 ## p1 ## p2\n\n/* w.3.2:   Number of arguments in macro call: more than 31.    */\n    assert(\n        glue63(\n            A0, B0, C0, D0, E0, F0, G0, H0, I0, J0, K0, L0, M0, N0, O0, P0,\n            A1, B1, C1, D1, E1, F1, G1, H1, I1, J1, K1, L1, M1, N1, O1, P1,\n            A2, B2, C2, D2, E2, F2, G2, H2, I2, J2, K2, L2, M2, N2, O2, P2,\n            A3, B3, C3, D3, E3, F3, G3, H3, I3, J3, K3, L3, M3, N3, O3)\n        == 123);\n\n/* w.3.3:   Initial significant characters in an identifier: more than 31.  */\n        {\n    int A23456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef = 63;\n    assert(\n        A23456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\n        == 63);\n        }\n\n/* w.3.4:   Nested conditional inclusion: more than 8 levels.   */\n    nest = 0;\n#define X0F\n#include    \"ifdef15.h\"\n    assert( nest == 0x0f);\n\n/* w.3.5:   Nested source file inclusion: more than 8 levels.   */\n    nest = 0;\n#define X0F\n#include    \"nest1.h\"\n    assert( nest == 0x0f);\n\n/* w.3.6:   Parenthesized expression: more than 32 levels.  */\n    nest = 0;\n#if \\\n        (0x00 + (0x01 - (0x02 + (0x03 - (0x04 + (0x05 - (0x06 + (0x07 - \\\n        (0x08 + (0x09 - (0x0A + (0x0B - (0x0C + (0x0D - (0x0E + (0x0F - \\\n        (0x10 + (0x11 - (0x12 + (0x13 - (0x14 + (0x15 - (0x16 + (0x17 - \\\n        (0x18 + (0x19 - (0x1A + (0x1B - (0x1C + (0x1D - (0x1E + (0x1F - \\\n        (0x20 + (0x21 - (0x22 + (0x23 - (0x24 + (0x25 - (0x26 + (0x27 - \\\n        (0x28 + (0x29 - (0x2A + (0x2B - (0x2C + (0x2D - (0x2E + (0x2F - \\\n        (0x30 + (0x31 - (0x32 + (0x33 - (0x34 + (0x35 - (0x36 + (0x37 - \\\n        (0x38 + (0x39 - (0x3A + (0x3B - (0x3C + (0x3D - 0x3E)           \\\n        )))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))   \\\n        == -1\n    nest = 63;\n#endif\n    assert( nest == 63);\n    }\n\n/* w.3.7:   Characters in a string (after concatenation): more than 509.    */\n    {\n    char    *string1023 =\n\"123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\n1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\n2123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\n3123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\n4123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\n5123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\n6123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\n7123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\n8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\n9123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\na123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\nb123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\nc123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\nd123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\ne123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\\\nf123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\"\n;\n    assert( strlen( string1023) == 1023);\n    }\n\n/* w.3.8:   Characters in a logical source line: more than 509. */\n    {\n    int a123456789012345678901234567890 = 123450;   \\\n    int b123456789012345678901234567890 = 123451;   \\\n    int c123456789012345678901234567890 = 123452;   \\\n    int d123456789012345678901234567890 = 123453;   \\\n    int e123456789012345678901234567890 = 123454;   \\\n    int f123456789012345678901234567890 = 123455;   \\\n    int g123456789012345678901234567890 = 123456;   \\\n    int h123456789012345678901234567890 = 123457;   \\\n    int i123456789012345678901234567890 = 123458;   \\\n    int j123456789012345678901234567890 = 123459;   \\\n    int k123456789012345678901234567890 = 123460;   \\\n    int l123456789012345678901234567890 = 123461;   \\\n    int m123456789012345678901234567890 = 123462;   \\\n    int n123456789012345678901234567890 = 123463;   \\\n    int o123456789012345678901234567890 = 123464;   \\\n    int p123456789012345678901234567890 = 123465;   \\\n    int q123456789012345678901234567890 = 123466;   \\\n    int r123456789012345678901234567890 = 123467;   \\\n    int s123456789012345678901234567890 = 123468;   \\\n    int t1234567890123456 = 123469;\n    }\n\n/* w.3.9:   Macro definitions: more than 1024 (including predefined ones).  */\n#include    \"m1024.h\"\n    assert( ZX == 1);\n\n    return  0;\n}\n\n"
  }
]