[
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\non:\n  push:\n  pull_request:\n\nconcurrency:\n  group: ci-${{ github.ref }}\n  cancel-in-progress: true\n\njobs:\n  build:\n    strategy:\n      fail-fast: false\n      matrix:\n        compiler:\n          - name: nim\n            version: devel\n          - name: nim\n            version: version-2-0\n          - name: nimskull\n            version: \"0.1.0-dev.21405\"\n          - name: nimskull\n            version: \"*\"\n\n        include:\n          - compiler:\n              name: nim\n              version: devel\n            build_doc: true\n\n    name: ${{ matrix.compiler.name }} ${{ matrix.compiler.version }}\n    runs-on: ubuntu-latest\n\n    defaults:\n      run:\n        shell: bash\n        working-directory: npeg\n\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4.1.1\n        with:\n          path: npeg\n\n      - name: Setup Nim\n        if: matrix.compiler.name == 'nim'\n        uses: alaviss/setup-nim@0.1.1\n        with:\n          path: nim\n          version: ${{ matrix.compiler.version }}\n\n      - name: Setup nimskull\n        id: nimskull\n        if: matrix.compiler.name == 'nimskull'\n        uses: nim-works/setup-nimskull@0.1.1\n        with:\n          nimskull-version: ${{ matrix.compiler.version }}\n\n      - name: Run tests\n        run: nim r --path:src tests/tests.nim\n\n      - name: Build docs\n        if: matrix.build_doc\n        shell: bash\n        run: |\n          branch=$GITHUB_REF\n          branch=${branch##*/}\n          for i in src/npeg.nim src/npeg/*.nim; do\n            nim doc --project --outdir:htmldocs \\\n              --path:src \\\n              \"--git.url:https://github.com/$GITHUB_REPOSITORY\" \\\n              \"--git.commit:$GITHUB_SHA\" \\\n              \"--git.devel:$branch\" \\\n              \"$i\"\n          done\n          # Make npeg module the default page\n          cp htmldocs/{npeg,index}.html\n\n      - name: Upload GitHub Pages artifact\n        if: matrix.build_doc\n        uses: actions/upload-pages-artifact@v3.0.1\n        with:\n          path: npeg/htmldocs\n\n  deploy:\n    needs:\n      - build\n    if: github.ref == 'refs/heads/master'\n\n    permissions:\n      actions: read\n      pages: write\n      id-token: write\n\n    environment:\n      name: github-pages\n      url: ${{ steps.deployment.outputs.page_url }}\n\n    name: Deploy docs to GitHub Pages\n    runs-on: ubuntu-latest\n    steps:\n      - name: Deploy page\n        id: deployment\n        uses: actions/deploy-pages@v4.0.4\n\n  passed:\n    needs: build\n    if: failure() || cancelled()\n    name: All tests passed\n\n    runs-on: ubuntu-latest\n    steps:\n      - run: exit 1\n"
  },
  {
    "path": ".github/workflows/contents.yml",
    "content": "name: Make table of contents\non: \n  push:\n    paths:\n      - README.md\n    branches:\n      - '**' \njobs:\n  make:\n    runs-on: ubuntu-latest\n    steps:\n        - uses: actions/checkout@v3\n        - uses: thatrandomperson5/AutoMarkdownContents@v1.1.1\n          with:\n            file: README.md\n            auto: true\n            skip-first: true\n        - name: Pull request\n          uses: peter-evans/create-pull-request@v4\n          with:\n            token: ${{ secrets.GITHUB_TOKEN }}\n            title: \"Add md table of contents\"\n            commit-message: \":clipboard: Added markdown table of contents\"\n            body: |\n              :clipboard: Added markdown table of contents\n            base: ${{ github.head_ref }} # Creates pull request onto pull request or commit branch\n            branch: actions/automd  \n\n"
  },
  {
    "path": "Changelog.md",
    "content": "\n1.3.0 - 2024-08-22\n==================\n\n- Added CI (thanks Leorize)\n- Performance optimization\n- Some small rafactorings\n\n1.2.1 - 2023-03-04\n==================\n\n- fixes for --styleCheck=usages\n\n1.2.0 - 2023-01-17\n==================\n\n- Split NPegException into more specific errors, updated documentation\n\n1.1.2 - 2023-01-08\n==================\n\n- Fixed compat with Nim 1.0.11\n\n1.1.1 - 2023-01-08\n==================\n\n- Disabled test with '∙' to avoid breaking older Nim\n\n1.1.0 - 2023-01-08\n==================\n\n- Added alternate `∙` concatenation operator\n- Fixed fixBareExceptionWarning in Nim devel\n- Added table of contents to README.md\n\n1.0.1 - 2022-12-10\n==================\n\n- Bugfix release, fixes \"expression 'discard' has no type (or is ambiguous)\" in \n  rare cases\n\n1.0.0 - 2022-11-27\n==================\n\n- Improved stack trace handling\n- Fixed matchFile() for empty files\n\n0.27.0 - 2022-11-06\n===================\n\n- Augment the Nim stack trace with the NPeg return stack on exceptions\n- Documentation updates\n\n0.26.0 - 2021-11-27\n===================\n\n- Improved lineinfo in code blocks for better backtraces\n- Some documentation improvements\n\n0.25.0 - 2021-09-11\n===================\n\n- Omit the `.computedGoto.` in the inner parser loop for grammars with more\n  then 10k instructions to work around the nim compiler limitation\n\n0.24.1 - 2021-01-16\n===================\n\n- Added mixin for 'repr' to allow clean tracing of user types\n\n0.24.0 - 2020-11-20\n===================\n\n- Added -d:npegGcsafe\n\n0.23.2 - 2020-11-06\n===================\n\n- Small improvement in npeg systax checking\n\n0.23.0 - 2020-09-23\n===================\n\n- Reinstated [] out of bound check for capturest\n- Dropped profiler support, the implementation was bad\n- Small documentation improvements\n- Added RFC3339 date parser to libs\n\n0.22.2 - 2019-12-27\n===================\n\n- Skip --gc:arc tests for nim <1.1 to fix Nim CI builds.\n\n0.22.1 - 2019-12-27\n===================\n\n- Bugfix in codegen causing problems with ^1 notation in code blocks.\n\n0.22.0 - 2019-12-24\n===================\n\n- Changed the parsing subject from `openArray[char]` to `openArray[T]` and\n  added a 'literal' atom to the grammar. This allows NPeg to parse lists of\n  any type, making it suitable for separate lexer and parser stages. See\n  tests/lexparse.nim for a concise example.\n\n- Added `@` syntactic sugar to access the match offset inside code block\n  captures.\n\n- Dropped Json and AST captures - no complains heard since deprecation, and it\n  simplifies the code base to aid the development new features.\n\n0.21.3 - 2019-12-06\n===================\n\n- Fixed off-by-one error in range `P[m..n]` operator, which would also match\n  `P` times `n+1`\n\n- Various documentation improvements\n\n0.21.2 - 2019-11-26\n===================\n\n- Fixed the way dollar captures are rewritten to avoid the name space clash\n  which was introduced by Nim PR #12712.\n\n0.21.1 - 2019-11-19\n===================\n\n- Bugfix for templates generating ordered choices\n\n0.21.0 - 2019-10-28\n===================\n\n- anonymous `patt` patterns now also take a code block\n\n- deprecated AST and Json captures. AST captures are not flexible enough, and\n  the functionality can be better implemented using code block captures and\n  domain-specific AST object types. The Json captures were added in the early\n  days of NPeg as a flexible way to store captures, but this does not mix well\n  with custom captures and can not handle things like string unescaping. Both\n  capture types were removed from the documentation and a .deprecated. pragma\n  was added to the implementation. If you use Json or AST captures and think\n  deprecation is a mistake, let me know.\n\n0.20.0 - 2019-10-18\n===================\n\n- Added precedence operators - this allows constructions of Pratt parsers with\n  bounded left recursion and operator precedence.\n- Added run time profiler, enable with -d:npegProfile\n- Performance improvements\n\n0.19.0 - 2019-10-11\n===================\n\n- Significant performance improvements\n- Changed semantincs of code block captures: $0 now always captures the\n  total subject captured in a rule. This is a minor API change that only\n  affects code using the `capture[]` notation inside code blocks\n- Added fail() function to force a parser fail in a code block capture\n- Added push() function to allow code block captures to push captures\n  back on the stack\n- Check for loops caused by repeat of empty strings at compile time\n\n0.18.0 - 2019-09-26\n===================\n\n- Runtime performance improvements\n\n0.17.1 - 2019-09-19\n===================\n\n- Bugfix release (removed lingering debug echo)\n\n0.17.0 - 2019-09-17\n===================\n\n- Various runtime and compiletime performance improvements\n\n0.16.0 - 2019-09-08\n===================\n\n- Templates can now also be used in libraries\n- Added railroad diagram generation with -d:npegGraph\n- Improved error reporting\n\n0.15.0 - 2019-08-31\n===================\n\n- Generic parser API changed: the peg() macro now explicity passes the\n  userdata type and identifier.\n\n0.14.1 - 2019-08-28\n===================\n\n- Added templates / parameterised rules\n- Added custom match validation in code block capture\n- Added basic types, utf8 and uri libs\n- Added global pattern library support\n- Proc matchFile() now uses memfiles/mmap for zero copy parsers\n- Implemented method to pass user variable to code block captures\n- Added AST capture type for building simple abstract syntax trees\n- Added Jb() capture for Json booleans\n\n0.13.0 - 2019-07-21\n===================\n\n- The capture[] variable available inside code block matches now allows access\n  to the match offset as well. This is an API change since the type of capture\n  changed from seq[string] to seq[Capture].\n\n0.12.0 - 2019-07-14\n===================\n\n- Documentation updates\n- Made some error bounds compile-time configurable\n- Fix for more strict Nim compiler checks\n\n0.11.0 - 2019-05-29\n===================\n\n- Added support for named backreferences\n- Added safeguards to prevent grammars growing out of bounds\n- Added Graphviz .dot debugging output for parser debugging\n- Added `matchLen` and `matchMax` fields to `NPegException`\n- Improved pattern syntax error messages\n\n0.10.0 - 2019-04-24\n===================\n\n- Fixed 'Graph' character class\n\n0.9.0 - 2019-03-31\n==================\n\n- Some syntax changes to fix compilation with mainline Nim 0.19.4\n\n0.8.0 - 2019-03-30\n==================\n\n- Added syntactic sugar for accessing the captures[] seq in capture\n  code blocks with dollar-number variables $1..$9\n\n0.7.0 - 2019-03-29\n==================\n\n- Action callbacks (%) dropped in favour of Nim code block callbacks.\n\n0.6.0 - 2019-03-27\n==================\n\n- API change: count syntax changed from {n} to [n].\n\n- Optimizations in code generation\n\n0.5.0 - 2019-03-27\n==================\n\n- API change: peg() and patt() now return an object of type Parser\n  instead of a proc, and the function match(p: Parser) is now used for\n  matching the subject. match() can match string and cstring types, \n  matchFile() matches a file using memFile.\n\n- Added builtin atoms Upper, Lower, Digit, HexDigit, Alpha\n\n- Added `@` search operator\n\n- Added `&` and predicate\n\n0.4.0 - 2019-03-24\n==================\n\n- Improved tracing output, during trace the originating rule name\n  for each instruction is dumped.\n\n- Optimizations\n"
  },
  {
    "path": "INTERNALS.md",
    "content": "\n## Introduction\n\nThis document briefly describes the inner workings of NPeg.\n\nThe main PEG algorithm is based on the Paper \"A Text Pattern-Matching Tool\nbased on Parsing Expression Grammars\" by Roberto Ierusalimschy, who is also the\nauthor or LPEG. While LPEG uses a VM approach for parsing, NPeg adds an\nadditional step where the VM code is compiled to native Nim code which does the\nparsing.\n\nThis is how NPeg works in short:\n\n- The grammar is parsed by a Nim macro which recursively transforms this into\n  a sequence of VM instructions for each grammar rule.\n\n- The set of instructions is 'linked' into a complete program of instructions\n\n- The linked program is translated/compiled into a state machine, implemented\n  as a large Nim `case` statement that performs the parsing of the subject\n  string.\n\n\n## Data structures\n\nThe following data structures are used for compiling the grammar:\n\n- `Inst`, short for \"instruction\": This is a object variant which implements a\n  basic VM instruction. It consists of the opcode and a number of data fields.\n\n- `Patt`, short for \"pattern\": A pattern is a sequence of instructions\n  `seq[Inst]` which typically match an atom from the grammar.\n\n- `Rule`: One complete, named pattern which is part of a grammar.\n\n- `Grammar`: A grammar is collection of named patterns implemented as a\n  `table[string, Patt]`. This is used as the intermediate representation of the\n  complete compiled grammar and holds patterns for each of the named rules.\n\n- `Program`: A complete linked program, consisting of a pattern and its debug\n  info (symbol table, textual listing)\n\n- `Parser`: object holding the compiled Nim matching function\n\nFor captures the following data structures are relevant:\n\n- `CapFrame`: A capframe is a frame of a specific type on the capture stack\n  that points to an offset in the subject string. For each capture open and\n  close pair a frame exists on the stack, thus allowing for nested captures.\n\n- `Capture`: A capture is a completed capture that is collected and finalized\n  when a capture is closed and finished. \n\nFor the generic procs and types, the following convention is used:\n\n- `[T]` is the type of optional \"user data\" the gets passed into the parser.\n  When this is not explicitly given with the `peg` macro, NPeg will stub this\n  with an unused bool\n\n- `[S]` is the type of the subject. This is typicall a string, although NPeg\n  is generic enough and can parse any `seq[S]`\n\n## Building a grammar\n\nThe first step in building a parser is the translation of the grammar into\nsnippets of VM instructions which match the data and perform flow control. For\ndetails of these instructions, refer to the paper by Ierusalimschy.\n\nThe `Patt` data type is used to store a sequence of instructions. This section\ndescribe how a pattern is built from Nim code, all of which lives in `patt.nim`\n- this mechanism is later used by the macro which is parsing the actual PEG\ngrammar.\n\nThe basic atoms are constructed by the `newPatt()` procedures. These take an\nargument describing what needs to be matched in the subject, and deliver a\nshort sequence of instructions. For example, the `newPatt(\"foo\")` procedure\nwill create a pattern consisting of a single instruction: \n\n```\n   1: line           opStr \"foo\"\n```\n\nThere are a number of operators defined which act on one or more patterns.\nThese operators are used to combine multiple patterns into larger patters.\n\nFor example, the `|` operator is used for the PEG ordered choice. This takes\ntwo patters, and results in a pattern that tries to match the first one and\nthen skips the second, or tries to match the second if the first fails:\n\n```\n   0: line           opChoice 3\n   1: line           opStr \"foo\"\n   2: line           opCommit 4\n   3: line           opStr \"bar\"\n   4:                opReturn\n```\n\nA number of patterns can be combined into a grammar, which is simply a table\nof patterns indexed by name.\n\n\n## PEG DSL to grammar\n\nThe user defines their NPeg grammar in a Nim code block, which consists of a\nnumber of named patterns. The whole grammar is handled by the `parseGrammar()`\nwhich iterates all individual named patterns. Each pattern is passed to the\n`parsePatt()` macro, which transforms the Nim code block AST into a NPeg\ngrammar. This macro recursively goes through the Nim AST and calls `newPatt()`\nfor building atoms, and calls the various operators acting on patterns to grow\nthe grammar.\n\n\n## Grammar to Nim code\n\nThe `genCode()` procedure is used to convert the list of instructions into Nim\ncode which implements the actual parser. This procedure builds a `case`\nstatement for each VM instruction, and inserts a template for each opcode for\neach case.\n\n\n## Example\n\nThe following grammar is specified by the user:\n\n```\n    lines <- *line                                                          \n    line <- \"foo\" | \"bar\"\n```\n\nThis is translated into the following VM program:\n\n```\nlines:\n   0: lines          opChoice 3\n   1: lines          opCall 4 line\n   2: lines          opPartCommit 1\n   3:                opReturn\n\nline:\n   4: line           opChoice 7\n   5: line           opStr \"foo\"\n   6: line           opCommit 8\n   7: line           opStr \"bar\"\n   8:                opReturn\n```\n\nwhich is then translated into the following `case` statement:\n\n```\n  while true:\n    case ip\n    of 0:\n      opChoiceFn(3, \"lines\")\n    of 1:\n      opCallFn(\"line\", 3, \"lines\")\n    of 2:\n      opPartCommitFn(1, \"lines\")\n    of 3:\n      opReturnFn(\"\")\n    of 4:\n      opChoiceFn(7, \"line\")\n    of 5:\n      opStrFn(\"foo\", \"line\")\n    of 6:\n      opCommitFn(8, \"line\")\n    of 7:\n      opStrFn(\"bar\", \"line\")\n    of 8:\n      opReturnFn(\"\")\n    else:\n      opFailFn()\n```\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright 2019 Ico Doornekamp <npeg@zevv.nl>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n"
  },
  {
    "path": "README.md",
    "content": "[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)\n![Stability: experimental](https://img.shields.io/badge/stability-stable-green.svg)\n\n<img src=\"https://raw.githubusercontent.com/zevv/npeg/master/doc/npeg.png\" alt=\"NPeg logo\" align=\"left\">\n\n> \"_Because friends don't let friends write parsers by hand_\"\n\nNPeg is a pure Nim pattern matching library. It provides macros to compile\npatterns and grammars (PEGs) to Nim procedures which will parse a string and\ncollect selected parts of the input. PEGs are not unlike regular expressions,\nbut offer more power and flexibility, and have less ambiguities. (More about \nPEGs on [Wikipedia](https://en.wikipedia.org/wiki/Parsing_expression_grammar))\n\n![Graph](/doc/syntax-diagram.png)\n\nSome use cases where NPeg is useful are configuration or data file parsers,\nrobust protocol implementations, input validation, lexing of programming\nlanguages or domain specific languages.\n\nSome NPeg highlights:\n\n- Grammar definitions and Nim code can be freely mixed. Nim code is embedded\n  using the normal Nim code block syntax, and does not disrupt the grammar\n  definition.\n\n- NPeg-generated parsers can be used both at run and at compile time.\n\n- NPeg offers various methods for tracing, optimizing and debugging\n  your parsers.\n\n- NPeg can parse sequences of any data types, also making it suitable as a\n  stage-two parser for lexed tokens.\n\n- NPeg can draw [cool diagrams](/doc/example-railroad.png)\n\n## Contents\n\n<!-- AutoContentStart -->\n- [Quickstart](#quickstart)\n- [Usage](#usage)\n    * [Simple patterns](#simple-patterns)\n    * [Grammars](#grammars)\n- [Syntax](#syntax)\n    * [Atoms](#atoms)\n    * [Operators](#operators)\n- [Precedence operators](#precedence-operators)\n- [Captures](#captures)\n    * [String captures](#string-captures)\n    * [Code block captures](#code-block-captures)\n        - [Custom match validations](#custom-match-validations)\n        - [Passing state](#passing-state)\n    * [Backreferences](#backreferences)\n- [More about grammars](#more-about-grammars)\n    * [Ordering of rules in a grammar](#ordering-of-rules-in-a-grammar)\n    * [Templates, or parameterized rules](#templates-or-parameterized-rules)\n    * [Composing grammars with libraries](#composing-grammars-with-libraries)\n    * [Library rule overriding/shadowing](#library-rule-overridingshadowing)\n- [Error handling](#error-handling)\n    * [MatchResult](#matchresult)\n    * [NpegParseError exceptions](#npegparseerror-exceptions)\n    * [Other exceptions](#other-exceptions)\n    * [Parser stack trace](#parser-stack-trace)\n- [Advanced topics](#advanced-topics)\n    * [Parsing other types then strings](#parsing-other-types-then-strings)\n- [Some notes on using PEGs](#some-notes-on-using-pegs)\n    * [Anchoring and searching](#anchoring-and-searching)\n    * [Complexity and performance](#complexity-and-performance)\n    * [End of string](#end-of-string)\n    * [Non-consuming atoms and captures](#non-consuming-atoms-and-captures)\n    * [Left recursion](#left-recursion)\n    * [UTF-8 / Unicode](#utf-8--unicode)\n- [Tracing and debugging](#tracing-and-debugging)\n    * [Syntax diagrams](#syntax-diagrams)\n    * [Grammar graphs](#grammar-graphs)\n    * [Tracing](#tracing)\n- [Compile-time configuration](#compile-time-configuration)\n- [Tracing and debugging](#tracing-and-debugging-1)\n- [Random stuff and frequently asked questions](#random-stuff-and-frequently-asked-questions)\n    * [Why does NPeg not support regular PEG syntax?](#why-does-npeg-not-support-regular-peg-syntax)\n    * [Can NPeg be used to parse EBNF grammars?](#can-npeg-be-used-to-parse-ebnf-grammars)\n    * [NPeg and generic functions](#npeg-and-generic-functions)\n- [Examples](#examples)\n    * [Parsing arithmetic expressions](#parsing-arithmetic-expressions)\n    * [A complete JSON parser](#a-complete-json-parser)\n    * [Captures](#captures-1)\n    * [More examples](#more-examples)\n- [Future directions / Todos / Roadmap / The long run](#future-directions--todos--roadmap--the-long-run)\n\n<!-- AutoContentEnd -->\n\n## Quickstart\n\nHere is a simple example showing the power of NPeg: The macro `peg` compiles a\ngrammar definition into a `parser` object, which is used to match a string and\nplace the key-value pairs into the Nim table `words`:\n\n```nim\nimport npeg, strutils, tables\n\ntype Dict = Table[string, int]\n\nlet parser = peg(\"pairs\", d: Dict):\n  pairs <- pair * *(',' * pair) * !1\n  word <- +Alpha\n  number <- +Digit\n  pair <- >word * '=' * >number:\n    d[$1] = parseInt($2)\n\nvar words: Dict\ndoAssert parser.match(\"one=1,two=2,three=3,four=4\", words).ok\necho words\n```\n\nOutput:\n\n```nim\n{\"two\": 2, \"three\": 3, \"one\": 1, \"four\": 4}\n```\n\nA brief explanation of the above code:\n\n* The macro `peg` is used to create a parser object, which uses `pairs` as the\n  initial grammar rule to match. The variable `d` of type `Dict` will be available\n  inside the code block parser for storing the parsed data.\n\n* The rule `pairs` matches one `pair`, followed by zero or more times (`*`) a\n  comma followed by a `pair`.\n\n* The rules `word` and `number` match a sequence of one or more (`+`)\n  alphabetic characters or digits, respectively. The `Alpha` and `Digit` rules\n  are pre-defined rules matching the character classes `{'A'..'Z','a'..'z'}` \n  and `{'0'..'9'}`.\n\n* The rule `pair` matches a `word`, followed by an equals sign (`=`), followed\n  by a `number`.\n\n* The `word` and `number` in the `pair` rule are captured with the `>`\n  operator. The Nim code fragment below this rule is executed for every match,\n  and stores the captured word and number in the `words` Nim table.\n\n\n## Usage\n\nThe `patt()` and `peg()` macros can be used to compile parser functions:\n\n- `patt()` creates a parser from a single anonymous pattern.\n\n- `peg()` allows the definition of a set of (potentially recursive) rules \n          making up a complete grammar.\n\nThe result of these macros is an object of the type `Parser` which can be used\nto parse a subject:\n\n```nim\nproc match(p: Parser, s: string) = MatchResult\nproc matchFile(p: Parser, fname: string) = MatchResult\n```\n\nThe above `match` functions returns an object of the type `MatchResult`:\n\n```nim\nMatchResult = object\n  ok: bool\n  matchLen: int\n  matchMax: int\n  ...\n```\n\n* `ok`: A boolean indicating if the matching succeeded without error. Note that\n  a successful match does not imply that *all of the subject* was matched,\n  unless the pattern explicitly matches the end-of-string.\n\n* `matchLen`: The number of input bytes of the subject that successfully\n  matched.\n\n* `matchMax`: The highest index into the subject that was reached during\n  parsing, *even if matching was backtracked or did not succeed*. This offset\n  is usually a good indication of the location where the matching error\n  occurred.\n\nThe string captures made during the parsing can be accessed with:\n\n```nim\nproc captures(m: MatchResult): seq[string]\n```\n\n\n### Simple patterns\n\nA simple pattern can be compiled with the `patt` macro.\n\nFor example, the pattern below splits a string by white space:\n\n```nim\nlet parser = patt *(*' ' * > +(1-' '))\necho parser.match(\"   one two three \").captures\n```\n\nOutput:\n\n```\n@[\"one\", \"two\", \"three\"]\n```\n\nThe `patt` macro can take an optional code block which is used as code block\ncapture for the pattern:\n\n```nim\nvar key, val: string\nlet p = patt >+Digit * \"=\" * >+Alpha:\n  (key, val) = ($1, $2)\n\nassert p.match(\"15=fifteen\").ok\necho key, \" = \", val\n```\n\n### Grammars\n\nThe `peg` macro provides a method to define (recursive) grammars. The first\nargument is the name of initial patterns, followed by a list of named patterns.\nPatterns can now refer to other patterns by name, allowing for recursion:\n\n```nim\nlet parser = peg \"ident\":\n  lower <- {'a'..'z'}\n  ident <- *lower\ndoAssert parser.match(\"lowercaseword\").ok\n```\n\nThe order in which the grammar patterns are defined affects the generated\nparser.\nAlthough NPeg could always reorder, this is a design choice to give the user\nmore control over the generated parser:\n\n* when a pattern `P1` refers to pattern `P2` which is defined *before* `P1`,\n  `P2` will be inlined in `P1`.  This increases the generated code size, but\n  generally improves performance.\n\n* when a pattern `P1` refers to pattern `P2` which is defined *after* `P1`,\n  `P2` will be generated as a subroutine which gets called from `P1`. This will\n  reduce code size, but might also result in a slower parser.\n\n\n## Syntax\n\nThe NPeg syntax is similar to normal PEG notation, but some changes were made\nto allow the grammar to be properly parsed by the Nim compiler:\n\n- NPeg uses prefixes instead of suffixes for `*`, `+`, `-` and `?`.\n- Ordered choice uses `|` instead of `/` because of operator precedence.\n- The explicit `*` infix operator is used for sequences.\n\nNPeg patterns and grammars can be composed from the following parts:\n\n```nim\n\nAtoms:\n\n   0              # matches always and consumes nothing\n   1              # matches any character\n   n              # matches exactly n characters\n  'x'             # matches literal character 'x'\n  \"xyz\"           # matches literal string \"xyz\"\n i\"xyz\"           # matches literal string, case insensitive\n  {'x'..'y'}      # matches any character in the range from 'x'..'y'\n  {'x','y','z'}   # matches any character from the set\n\nOperators:\n\n   P1 * P2        # concatenation\n   P1 | P2        # ordered choice\n   P1 - P2        # matches P1 if P2 does not match\n  (P)             # grouping\n  !P              # matches everything but P\n  &P              # matches P without consuming input\n  ?P              # matches P zero or one times\n  *P              # matches P zero or more times\n  +P              # matches P one or more times\n  @P              # search for P\n   P[n]           # matches P n times\n   P[m..n]        # matches P m to n times\n\nPrecedence operators:\n\n  P ^ N           # P is left associative with precedence N\n  P ^^ N          # P is right associative with precedence N\n\nString captures:  \n\n  >P              # Captures the string matching  P \n\nBack references:\n\n  R(\"tag\", P)     # Create a named reference for pattern P\n  R(\"tag\")        # Matches the given named reference\n\nError handling:\n\n  E\"msg\"          # Raise an `NPegParseError` exception\n```\n\nIn addition to the above, NPeg provides the following built-in shortcuts for\ncommon atoms, corresponding to POSIX character classes:\n\n```nim\n  Alnum  <- {'A'..'Z','a'..'z','0'..'9'}, # Alphanumeric characters\n  Alpha  <- {'A'..'Z','a'..'z'},          # Alphabetic characters\n  Blank  <- {' ','\\t'},                   # Space and tab\n  Cntrl  <- {'\\x00'..'\\x1f','\\x7f'},      # Control characters\n  Digit  <- {'0'..'9'},                   # Digits\n  Graph  <- {'\\x21'..'\\x7e'},             # Visible characters\n  Lower  <- {'a'..'z'},                   # Lowercase characters\n  Print  <- {'\\x21'..'\\x7e',' '},         # Visible characters and spaces\n  Space  <- {'\\9'..'\\13',' '},            # Whitespace characters\n  Upper  <- {'A'..'Z'},                   # Uppercase characters\n  Xdigit <- {'A'..'F','a'..'f','0'..'9'}, # Hexadecimal digits\n```\n\n\n### Atoms\n\nAtoms are the basic building blocks for a grammar, describing the parts of the\nsubject that should be matched.\n\n- Integer literal: `0` / `1` / `n`\n\n  The int literal atom `n` matches exactly n number of bytes. `0` always\n  matches, but does not consume any data.\n\n\n- Character and string literals: `'x'` / `\"xyz\"` / `i\"xyz\"`\n\n  Characters and strings are literally matched. If a string is prefixed with\n  `i`, it will be matched case insensitive.\n\n\n- Character sets: `{'x','y'}`\n\n  Characters set notation is similar to native Nim. A set consists of zero or\n  more comma separated characters or character ranges.\n\n  ```nim\n   {'x'..'y'}    # matches any character in the range from 'x'..'y'\n   {'x','y','z'} # matches any character from the set 'x', 'y', and 'z'\n  ```\n\n  The set syntax `{}` is flexible and can take multiple ranges and characters\n  in one expression, for example `{'0'..'9','a'..'f','A'..'F'}`.\n\n\n### Operators\n\nNPeg provides various prefix and infix operators. These operators combine or\ntransform one or more patterns into expressions, building larger patterns.\n\n- Concatenation: `P1 * P2`\n\n  ```\n  o──[P1]───[P2]──o\n  ```\n\n  The pattern `P1 * P2` returns a new pattern that matches only if first `P1`\n  matches, followed by `P2`.\n\n  For example, `\"foo\" * \"bar\"` would only match the string `\"foobar\"`.\n\n  Note: As an alternative for the `*` asterisk, the unicode glyph `∙` (\"bullet\n  operator\", 0x2219) can also be used for concatenation.\n\n\n- Ordered choice: `P1 | P2`\n\n  ```\n  o─┬─[P1]─┬─o\n    ╰─[P2]─╯\n  ```\n\n  The pattern `P1 | P2` tries to first match pattern `P1`. If this succeeds,\n  matching will proceed without trying `P2`. Only if `P1` can not be matched,\n  NPeg will backtrack and try to match `P2` instead. Once either `P1` or `P2` has\n  matched, the choice will be final (\"commited\"), and no more backtracking will\n  be possible for this choice.\n\n  For example `(\"foo\" | \"bar\") * \"fizz\"` would match both `\"foofizz\"` and\n  `\"barfizz\"`.\n\n  NPeg optimizes the `|` operator for characters and character sets: The\n  pattern `'a' | 'b' | 'c'` will be rewritten to a character set\n  `{'a','b','c'}`.\n\n\n- Difference: `P1 - P2`\n\n  The pattern `P1 - P2` matches `P1` *only* if `P2` does not match. This is\n  equivalent to `!P2 * P1`:\n  \n  ```\n     ━━━━\n  o──[P2]─»─[P1]──o\n  ```\n\n  NPeg optimizes the `-` operator for characters and character sets: The\n  pattern `{'a','b','c'} - 'b'` will be rewritten to the character set\n  `{'a','c'}`.\n\n\n- Grouping: `(P)`\n\n  Brackets are used to group patterns similar to normal arithmetic expressions.\n\n\n- Not-predicate: `!P`\n\n  ```\n     ━━━\n  o──[P]──o\n  ```\n\n  The pattern `!P` returns a pattern that matches only if the input does not\n  match `P`.\n  In contrast to most other patterns, this pattern does not consume any input.\n\n  A common usage for this operator is the pattern `!1`, meaning \"only succeed\n  if there is not a single character left to match\" - which is only true for\n  the end of the string.\n\n\n- And-predicate: `&P`\n\n  ```\n     ━━━\n     ━━━\n  o──[P]──o\n  ```\n\n  The pattern `&P` matches only if the input matches `P`, but will *not*\n  consume any input. This is equivalent to `!!P`. This is denoted by a double\n  negation in the railroad diagram, which is not very pretty unfortunately.\n\n- Optional: `?P`\n\n  ```\n    ╭──»──╮\n  o─┴─[P]─┴─o\n  ```\n\n  The pattern `?P` matches if `P` can be matched zero or more times, so\n  essentially succeeds if `P` either matches or not.\n\n  For example, `?\"foo\" * bar\"` matches both `\"foobar\"` and `\"bar\"`.\n\n\n- Match zero or more times: `*P`\n\n  ```\n    ╭───»───╮\n  o─┴┬─[P]─┬┴─o\n     ╰──«──╯\n  ```\n\n  The pattern `*P` tries to match as many occurrences of pattern `P` as\n  possible - this operator always behaves *greedily*.\n\n  For example, `*\"foo\" * \"bar\"` matches `\"bar\"`, `\"fooboar\"`, `\"foofoobar\"`,\n  etc.\n\n\n- Match one or more times: `+P`\n\n  ```\n  o─┬─[P]─┬─o\n    ╰──«──╯\n  ```\n\n  The pattern `+P` matches `P` at least once, but also more times.\n  It is equivalent to the `P * *P` - this operator always behave *greedily*.\n\n\n- Search: `@P`\n\n  This operator searches for pattern `P` using an optimized implementation. It\n  is equivalent to `s <- *(1 - P) * P`, which can be read as \"try to match as\n  many characters as possible not matching `P`, and then match `P`:\n\n  ```\n    ╭─────»─────╮\n    │  ━━━      │\n  o─┴┬─[P]─»─1─┬┴»─[P]──o\n     ╰────«────╯\n  ```\n\n  Note that this operator does not allow capturing the skipped data up to the\n  match; if this is required you can manually construct a grammar to do this.\n\n\n- Match exactly `n` times: `P[n]`\n\n  The pattern `P[n]` matches `P` exactly `n` times.\n\n  For example, `\"foo\"[3]` only matches the string `\"foofoofoo\"`:\n\n  ```\n  o──[P]─»─[P]─»─[P]──o\n  ```\n\n\n- Match `m` to `n` times: `P[m..n]`\n\n  The pattern `P[m..n]` matches `P` at least `m` and at most `n` times.\n\n  For example, `\"foo[1,3]\"` matches `\"foo\"`, `\"foofoo\"` and `\"foofoofo\"`:\n\n  ```\n          ╭──»──╮ ╭──»──╮\n  o──[P]─»┴─[P]─┴»┴─[P]─┴─o\n  ```\n\n\n## Precedence operators\n\nNote: This is an experimental feature, the implementation or API might change\nin the future.\n\nPrecedence operators allows for the construction of \"precedence climbing\" or\n\"Pratt parsers\" with NPeg. The main use for this feature is building parsers\nfor programming languages that follow the usual precedence and associativity\nrules of arithmetic expressions.\n\n- Left associative precedence of `N`: `P ^ N`\n\n```\n   <1<   \no──[P]──o\n```\n\n- Right associative precedence of `N`: `P ^^ N`\n\n```\n   >1> \no──[P]──o\n```\n\nDuring parsing NPeg keeps track of the current precedence level of the parsed\nexpression - the default is `0` if no precedence has been assigned yet. When\nthe `^` operator is matched, either one of the next three cases applies:\n\n- `P ^ N` where `N > 0` and `N` is lower then the current precedence: in this\n  case the current precedence is set to `N` and parsing of pattern `P`\n  continues.\n\n- `P ^ N` where `N > 0` and `N` is higher or equal then the current precedence:\n  parsing will fail and backtrack.\n\n- `P ^ 0`: resets the current precedence to 0 and continues parsing. This main\n  use case for this is parsing sub-expressions in parentheses.\n\nThe heart of a Pratt parser in NPeg would look something like this:\n\n```nim\nexp <- prefix * *infix\n\nparenExp <- ( \"(\" * exp * \")\" ) ^ 0\n\nprefix <- number | parenExp\n\ninfix <- {'+','-'}    * exp ^  1 |\n         {'*','/'}    * exp ^  2 |\n         {'^'}        * exp ^^ 3:\n```\n\nMore extensive documentation will be added later, for now take a look at the\nexample in `tests/precedence.nim`.\n\n\n## Captures\n\n```\n     ╭╶╶╶╶╶╮\ns o────[P]────o\n     ╰╶╶╶╶╶╯\n```\n\nNPeg supports a number of ways to capture data when parsing a string.\nThe various capture methods are described here, including a concise example.\n\nThe capture examples below build on the following small PEG, which parses\na comma separated list of key-value pairs:\n\n```nim\nconst data = \"one=1,two=2,three=3,four=4\"\n\nlet parser = peg \"pairs\":\n  pairs <- pair * *(',' * pair) * !1\n  word <- +Alpha\n  number <- +Digit\n  pair <- word * '=' * number\n\nlet r = parser.match(data)\n```\n\n### String captures\n\nThe basic method for capturing is marking parts of the peg with the capture\nprefix `>`. During parsing NPeg keeps track of all matches, properly discarding\nany matches which were invalidated by backtracking. Only when parsing has fully\nsucceeded it creates a `seq[string]` of all matched parts, which is then\nreturned in the `MatchData.captures` field.\n\nIn the example, the `>` capture prefix is added to the `word` and `number`\nrules, causing the matched words and numbers to be appended to the result\ncapture `seq[string]`:\n\n```nim\nlet parser = peg \"pairs\":\n  pairs <- pair * *(',' * pair) * !1\n  word <- +Alpha\n  number <- +Digit\n  pair <- >word * '=' * >number\n\nlet r = parser.match(data)\n```\n\nThe resulting list of captures is now:\n\n```nim\n@[\"one\", \"1\", \"two\", \"2\", \"three\", \"3\", \"four\", \"4\"]\n```\n\n\n### Code block captures\n\nCode block captures offer the most flexibility for accessing matched data in\nNPeg. This allows you to define a grammar with embedded Nim code for handling\nthe data during parsing.\n\nNote that for code block captures, the Nim code gets executed during parsing,\n*even if the match is part of a pattern that fails and is later backtracked*.\n\nWhen a grammar rule ends with a colon `:`, the next indented block in the\ngrammar is interpreted as Nim code, which gets executed when the rule has been\nmatched. Any string captures that were made inside the rule are available to\nthe Nim code in the injected variable `capture[]` of type `seq[Capture]`:\n\n```\ntype Capture = object\n  s*: string      # The captured string\n  si*: int        # The index of the captured string in the subject\n```\n\nThe total subject matched by the code block rule is available in `capture[0]`\nAny additional explicit `>` string captures made by the rule or any of its\nchild rules will be available as `capture[1]`, `capture[2]`, ...\n\nFor convenience there is syntactic sugar available in the code block capture\nblocks:\n\n- The variables `$0` to `$9` are rewritten to `capture[n].s` and can be used to\n  access the captured strings. The `$` operator uses then usual Nim precedence,\n  thus these variables might need parentheses or different ordering in some\n  cases, for example `$1.parseInt` should be written as `parseInt($1)`.\n\n- The variables `@0` to `@9` are rewritten to `capture[n].si` and can be used\n  to access the offset in the subject of the matched captures.\n\nExample:\n```nim\nlet p = peg foo:\n  foo <- >(1 * >1) * 1:\n    echo \"$0 = \", $0\n    echo \"$1 = \", $1\n    echo \"$2 = \", $2\n       \necho p.match(\"abc\").ok\n```\n\nWill output\n\n```nim\n$0 = abc\n$1 = ab\n$2 = b\n```\n\nCode block captures consume all embedded string captures, so these captures\nwill no longer be available after matching.\n\nA code block capture can also produce captures by calling the `push(s: string)`\nfunction from the code block. Note that this is an experimental feature and\nthat the API might change in future versions.\n\nThe example has been extended to capture each word and number with the `>`\nstring capture prefix. When the `pair` rule is matched, the attached code block\nis executed, which adds the parsed key and value to the `words` table.\n\n```nim\nfrom strutils import parseInt\nvar words = initTable[string, int]()\n\nlet parser = peg \"pairs\":\n  pairs <- pair * *(',' * pair) * !1\n  word <- +Alpha\n  number <- +Digit\n  pair <- >word * '=' * >number:\n    words[$1] = parseInt($2)\n\nlet r = parser.match(data)\n```\n\nAfter the parsing finished, the `words` table will now contain:\n\n```nim\n{\"two\": 2, \"three\": 3, \"one\": 1, \"four\": 4}\n```\n\n\n#### Custom match validations\n\nCode block captures can be used for additional validation of a captured string:\nthe code block can call the functions `fail()` or `validate(bool)` to indicate\nif the match should succeed or fail. Failing matches are handled as if the\ncapture itself failed and will result in the usual backtracking. When the\n`fail()` or `validate()` functions are not called, the match will succeed\nimplicitly.\n\nFor example, the following rule will check if a passed number is a valid\n`uint8` number:\n\n```nim\nuint8 <- >Digit[1..3]:\n  let v = parseInt($a)\n  validate v>=0 and v<=255\n```\n\nThe following grammar will cause the whole parse to fail when the `error` rule\nmatches:\n\n```nim\nerror <- 0:\n  fail()\n```\n\nNote: The Nim code block is running within the NPeg parser context and in\ntheory could access to its internal state - this could be used to create custom\nvalidator/matcher functions that can inspect the subject string, do lookahead\nor lookback, and adjust the subject index to consume input. At the time of\nwriting, NPeg lacks a formal API or interface for this though, and I am not\nsure yet what this should look like - If you are interested in doing this,\ncontact me so we can discuss the details.\n\n#### Passing state\n\nNPeg allows passing of data of a specific type to the `match()` function, this\nvalue is then available inside code blocks as a variable. This mitigates the\nneed for global variables for storing or retrieving data in access captures.\n\nThe syntax for passing data in a grammar is:\n\n```\npeg(name, identifier: Type)\n```\n\nFor example, the above parser can be rewritten as such:\n\n```nim\ntype Dict = Table[string, int]\n\nlet parser = peg(\"pairs\", userdata: Dict):\n  pairs <- pair * *(',' * pair) * !1\n  word <- +Alpha\n  number <- +Digit\n  pair <- >word * '=' * >number:\n    userdata[$1] = parseInt($2)\n\nvar words: Dict\nlet r = parser.match(data, words)\n```\n\n\n### Backreferences\n\nBackreferences allow NPeg to match an exact string that matched earlier in the\ngrammar. This can be useful to match repetitions of the same word, or for\nexample to match so called here-documents in programming languages.\n\nFor this, NPeg offers the `R` operator with the following two uses:\n\n* The `R(name, P)` pattern creates a named reference for pattern `P` which can\n  be referred to by name in other places in the grammar.\n\n* The pattern `R(name)` matches the contents of the named reference that\n  earlier been stored with `R(name, P)` pattern.\n\nFor example, the following rule will match only a string which will have the \nsame character in the first and last position:\n\n```\npatt R(\"c\", 1) * *(1 - R(\"c\")) * R(\"c\") * !1\n```\n\nThe first part of the rule `R(\"c\", 1)` will match any character, and store this\nin the named reference `c`. The second part will match a sequence of zero or\nmore characters that do not match reference `c`, followed by reference `c`.\n\n\n## More about grammars\n\n\n### Ordering of rules in a grammar\n\nRepetitive inlining of rules might cause a grammar to grow too large, resulting\nin a huge executable size and slow compilation. NPeg tries to mitigate this in\ntwo ways:\n\n* Patterns that are too large will not be inlined, even if the above ordering\n  rules apply.\n\n* NPeg checks the size of the total grammar, and if it thinks it is too large\n  it will fail compilation with the error message `NPeg: grammar too complex`.\n\nCheck the section \"Compile-time configuration\" below for more details about too\ncomplex grammars.\n\nThe parser size and performance depends on many factors; when performance\nand/or code size matters, it pays to experiment with different orderings and\nmeasure the results.\n\nWhen in doubt, check the generated parser instructions by compiling with the\n`-d:npegTrace` or `-d:npegDotDir` flags - see the section Tracing and\nDebugging for more information.\n\nAt this time the upper limit is 4096 rules, this might become a configurable\nnumber in a future release.\n\nFor example, the following grammar will not compile because recursive inlining\nwill cause it to expand to a parser with more then 4^6 = 4096 rules:\n\n```\nlet p = peg \"z\":\n  f <- 1\n  e <- f * f * f * f\n  d <- e * e * e * e\n  c <- d * d * d * d\n  b <- c * c * c * c\n  a <- b * b * b * b\n  z <- a * a * a * a\n```\n\nThe fix is to change the order of the rules so that instead of inlining NPeg\nwill use a calling mechanism:\n\n```\nlet p = peg \"z\":\n  z <- a * a * a * a\n  a <- b * b * b * b\n  b <- c * c * c * c\n  c <- d * d * d * d\n  d <- e * e * e * e\n  e <- f * f * f * f\n  f <- 1\n```\n\nWhen in doubt check the generated parser instructions by compiling with the\n`-d:npegTrace` flag - see the section Tracing and Debugging for more\ninformation.\n\n\n### Templates, or parameterized rules\n\nWhen building more complex grammars you may find yourself duplicating certain\nconstructs in patterns over and over again. To avoid code repetition (DRY),\nNPeg provides a simple mechanism to allow the creation of parameterized rules.\nIn good Nim-fashion these rules are called \"templates\". Templates are defined\njust like normal rules, but have a list of arguments, which are referred to in\nthe rule. Technically, templates just perform a basic search-and-replace\noperation: every occurrence of a named argument is replaced by the exact\npattern passed to the template when called.\n\nFor example, consider the following grammar:\n\n```nim\nnumberList <- +Digit * *( ',' * +Digit)\nwordList <- +Alpha * *( ',' * +Alpha)\n```\n\nThis snippet uses a common pattern twice for matching lists: `p * *( ',' * p)`.\nThis matches pattern `p`, followed by zero or more occurrences of a comma\nfollowed by pattern `p`. For example, `numberList` will match the string\n`1,22,3`.\n\nThe above example can be parameterized with a template like this:\n\n```nim\ncommaList(item) <- item * *( ',' * item )\nnumberList <- commaList(+Digit)\nwordList <- commaList(+Alpha)\n```\n\nHere the template `commaList` is defined, and any occurrence of its argument\n'item' will be replaced with the patterns passed when calling the template.\nThis template is used to define the more complex patterns `numberList` and\n`wordList`.\n\nTemplates may invoke other templates recursively; for example the above can\neven be further generalized:\n\n```nim\nlist(item, sep) <- item * *( sep * item )\ncommaList(item) <- list(item, ',')\nnumberList <- commaList(+Digit)\nwordList <- commaList(+Alpha)\n```\n\n\n### Composing grammars with libraries\n\nFor simple grammars it is usually fine to build all patterns from scratch from\natoms and operators, but for more complex grammars it makes sense to define\nreusable patterns as basic building blocks.\n\nFor this, NPeg keeps track of a global library of patterns and templates. The\n`grammar` macro can be used to add rules or templates to this library. All\npatterns in the library will be stored with a *qualified* identifier in the\nform `libraryname.patternname`, by which they can be referred to at a later\ntime.\n\nFor example, the following fragment defines three rules in the library with the\nname `number`. The rules will be stored in the global library and are referred\nto in the peg by their qualified names `number.dec`, `number.hex` and\n`number.oct`:\n\n```nim\ngrammar \"number\":\n  dec <- {'1'..'9'} * *{'0'..'9'}\n  hex <- i\"0x\" * +{'0'..'9','a'..'f','A'..'F'}\n  oct <- '0' * *{'0'..'9'}\n\nlet p = peg \"line\":\n  line <- int * *(\",\" * int)\n  int <- number.dec | number.hex | number.oct\n\nlet r = p.match(\"123,0x42,0644\")\n```\n\nNPeg offers a number of pre-defined libraries for your convenience, these can\nbe found in the `npeg/lib` directory. A library an be imported with the regular\nNim `import` statement, all rules defined in the imported file will then be\nadded to NPeg's global pattern library. For example:\n\n```nim\nimport npeg/lib/uri\n```\n\n\nNote that templates defined in libraries do not implicitly bind the the rules\nfrom that grammar; instead, you need to explicitly qualify the rules used in\nthe template to refer to the grammar. For example:\n\n```nim\ngrammar \"foo\":\n  open <- \"(\"\n  close <- \")\"\n  inBrackets(body): foo.open * body * foo.close\n```\n\n### Library rule overriding/shadowing\n\nTo allow the user to add custom captures to imported grammars or rules, it is\npossible to *override* or *shadow* an existing rule in a grammar.\n\nOverriding will replace the rule from the library with the provided new rule,\nallowing the caller to change parts of an imported grammar. A overridden rule\nis allowed to reference the original rule by name, which will cause the new\nrule to *shadow* the original rule. This will effectively rename the original\nrule and replace it with the newly defined rule which will call the original\nreferred rule.\n\nFor example, the following snippet will reuse the grammar from the `uri`\nlibrary and capture some parts of the URI in a Nim object:\n\n```nim\nimport npeg/lib/uri\n\ntype Uri = object\n  host: string\n  scheme: string\n  path: string\n  port: int\n\nvar myUri: Uri\n\nlet parser = peg \"line\":\n  line <- uri.URI\n  uri.scheme <- >uri.scheme: myUri.scheme = $1\n  uri.host <- >uri.host:     myUri.host = $1\n  uri.port <- >uri.port:     myUri.port = parseInt($1)\n  uri.path <- >uri.path:     myUri.path = $1\n\necho parser.match(\"http://nim-lang.org:8080/one/two/three\")\necho myUri  # --> (host: \"nim-lang.org\", scheme: \"http\", path: \"/one/two/three\", port: 8080)\n```\n\n## Error handling\n\nNPeg offers a number of ways to handle errors during parsing a subject string;\nwhat method best suits your parser depends on your requirements. \n\n\n### MatchResult\n\nThe most simple way to handle errors is to inspect the `MatchResult` object\nthat is returned by the `match()` proc:\n\n```nim\nMatchResult = object\n  ok: bool\n  matchLen: int\n  matchMax: int\n```\n\nThe `ok` field in the `MatchResult` indicates if the parser was successful:\nwhen the complete pattern has been matched this value will be set to `true`,\nif the complete pattern did not match the subject the value will be `false`.\n\nIn addition to the `ok` field, the `matchMax` field indicates the maximum\noffset into the subject the parser was able to match the string. If the\nmatching succeeded `matchMax` equals the total length of the subject, if the\nmatching failed, the value of `matchMax` is usually a good indication of where\nin the subject string the error occurred:\n\n```\nlet a = patt 4\nlet r = a.match(\"123\")\nif not r.ok:\n  echo \"Parsing failed at position \", r.matchMax\n```\n\n### NpegParseError exceptions\n\nWhen, during matching, the parser reaches an `E\"message\"` atom in the grammar,\nNPeg will raise an `NPegParseError` exception with the given message.\nThe typical use case for this atom is to be combine with the ordered choice `|`\noperator to generate helpful error messages.\nThe following example illustrates this:\n\n```nim\nlet parser = peg \"list\":\n  list <- word * *(comma * word) * !1\n  word <- +Alpha | E\"expected word\"\n  comma <- ',' | E\"expected comma\"\n\ntry:\n  echo parser.match(\"one,two;three\")\nexcept NPegParseError as e:\n  echo \"Parsing failed at position \", e.matchMax, \": \", e.msg\n```\n\nThe rule `comma` tries to match the literal `','`. If this can not be matched,\nthe rule `E\"expected comma\"` will match instead, where `E` will raise an\n`NPegParseError` exception.\n\nThe `NPegParseError` type contains the same two fields as `MatchResult` to\nindicate where in the subject string the match failed: `matchLen` and\n`matchMax`, which can be used as an indication of the location of the parse\nerror:\n\n```\nParsing failed at position 7: expected comma\n```\n\n\n### Other exceptions\n\nNPeg can raise a number of other exception types during parsing:\n\n- `NPegParseError`: described in the previous section\n\n- `NPegStackOverflowError`: a stack overflow occured in the backtrace\n  or call stack; this is usually an indication of a faulty or too complex\n  grammar.\n\n- `NPegUnknownBackrefError`: An unknown back reference identifier is used in an \n  `R()` rule.\n\n- `NPegCaptureOutOfRangeError`: A code block capture tries to access a capture\n  that is not available using the `$` notation or by accessing the `capture[]`\n  seq.\n\n\nAll the above errors are inherited from the generic `NPegException` object.\n\n\n### Parser stack trace\n\nIf an exception is raised from within an NPeg parser - either by the `E` atom\nor by nim code in a code block capture - NPeg will augment the Nim stack trace\nwith frames indicating where in the grammar the exception occured.\n\nThe above example will generate the following stack trace, note the last two\nentries which are added by NPeg and show the rules in which the exception\noccured:\n\n```\n/tmp/list.nim(9)         list\n./npeg/src/npeg.nim(142) match\n./npeg/src/npeg.nim(135) match\n/tmp/flop.nim(4)         list <- word * *(comma * word) * eof\n/tmp/flop.nim(7)         word <- +{'a' .. 'z'} | E\"expected word\"\nError: unhandled exception: Parsing error at #14: \"expected word\" [NPegParseError]\n```\n\nNote: this requires Nim 'devel' or version > 1.6.x; on older versions you can\nuse `-d:npegStackTrace` to make NPeg dump the stack to stdout.\n\n\n## Advanced topics\n\n### Parsing other types then strings\n\nNote: This is an experimental feature, the implementation or API might change\nin the future.\n\nNPeg was originally designed to parse strings like a regular PEG engine, but\nhas since evolved into a generic parser that can parse any subject of type\n`openArray[T]`. This section describes how to use this feature.\n\n- The `peg()` macro must be passed an additional argument specifying the base\n  type `T` of the subject; the generated parser will then parse a subject of\n  type `openArray[T]`. When not given, the default type is `char`, and the parser\n  parsers `openArray[char]`, or more typically, `string`.\n\n- When matching non-strings, some of the usual atoms like strings or character\n  sets do not make sense in a grammar, instead the grammar uses literal atoms.\n  Literals can be specified in square brackets and are interpreted as any Nim\n  code: `[foo]`, `[1+1]` or `[\"foo\"]` are all valid literals.\n\n- When matching non-strings, captures will be limited to only a single element\n  of the base type, as this makes more sense when parsing a token stream.\n\nFor an example of this feature check the example in `tests/lexparse.nim` - this\nimplements a classic parser with separate lexing and parsing stages.\n\n\n## Some notes on using PEGs\n\n\n### Anchoring and searching\n\nUnlike regular expressions, PEGs are always matched in *anchored* mode only:\nthe defined pattern is matched from the start of the subject string.\nFor example, the pattern `\"bar\"` does not match the string `\"foobar\"`.\n\nTo search for a pattern in a stream, a construct like this can be used:\n\n```nim\np <- \"bar\"\nsearch <- p | 1 * search\n```\n\nThe above grammar first tries to match pattern `p`, or if that fails, matches\nany character `1` and recurs back to itself. Because searching is a common\noperation, NPeg provides the builtin `@P` operator for this.\n\n\n### Complexity and performance\n\nAlthough it is possible to write patterns with exponential time complexity for\nNPeg, they are much less common than in regular expressions, thanks to the\nlimited backtracking. In particular, patterns written without grammatical rules\nalways have a worst-case time `O(n^k)` (and space `O(k)`, which is constant for\na given pattern), where `k` is the pattern's star height. Moreover, NPeg has a\nsimple and clear performance model that allows programmers to understand and\npredict the time complexity of their patterns. The model also provides a firm\nbasis for pattern optimizations.\n\n(Adapted from Ierusalimschy, \"A Text Pattern-Matching Tool based on Parsing\nExpression Grammars\", 2008)\n\n\n### End of string\n\nPEGs do not care what is in the subject string after the matching succeeds. For\nexample, the rule `\"foo\"` happily matches the string `\"foobar\"`. To make sure\nthe pattern matches the end of string, this has to be made explicit in the\npattern.\n\nThe idiomatic notation for this is `!1`, meaning \"only succeed if there is not\na single character left to match\" - which is only true for the end of the\nstring.\n\n\n### Non-consuming atoms and captures\n\nThe lookahead(`&`) and not(`!`) operators may not consume any input, and make\nsure that after matching the internal parsing state of the parser is reset to\nas is was before the operator was started, including the state of the captures.\nThis means that any captures made inside a `&` and `!` block also are\ndiscarded. It is possible however to capture the contents of a non-consuming\nblock with a code block capture, as these are _always_ executed, even when the\nparser state is rolled back afterwards.\n\n\n### Left recursion\n\nNPeg does not support left recursion (this applies to PEGs in general). For\nexample, the rule\n\n```nim\nA <- A | 'a'\n```\n\nwill cause an infinite loop because it allows for left-recursion of the\nnon-terminal `A`.\n\nSimilarly, the grammar\n\n```nim\nA <- B | 'a' A\nB <- A\n```\n\nis problematic because it is mutually left-recursive through the non-terminal\n`B`.\n\nNote that loops of patterns that can match the empty string will not result in\nthe expected behavior. For example, the rule `*0` will cause the parser to\nstall and go into an infinite loop.\n\n\n### UTF-8 / Unicode\n\nNPeg has no built-in support for Unicode or UTF-8, instead is simply able to\nparse UTF-8 documents just as like any other string. NPeg comes with a simple\nUTF-8 grammar library which should simplify common operations like matching a\nsingle code point or character class. The following grammar splits an UTF-8\ndocument into separate characters/glyphs by using the `utf8.any` rule:\n\n```nim\nimport npeg/lib/utf8\n\nlet p = peg \"line\":\n  line <- +char\n  char <- >utf8.any\n\nlet r = p.match(\"γνωρίζω\")\necho r.captures()   # --> @[\"γ\", \"ν\", \"ω\", \"ρ\", \"ί\", \"ζ\", \"ω\"]\n```\n\n\n## Tracing and debugging\n\n### Syntax diagrams\n\nWhen compiled with `-d:npegGraph`, NPeg will dump \n[syntax diagrams](https://en.wikipedia.org/wiki/Syntax_diagram)\n(also known as railroad diagrams) for all parsed rules.\n\nSyntax diagrams are sometimes helpful to understand or debug a grammar, or to\nget more insight in a grammars' complexity.\n\n```\n                              ╭─────────»──────────╮                     \n                              │      ╭─────»──────╮│                     \n                ╭╶╶╶╶╶╶╶╶╶╶╮  │      │  ━━━━      ││         ╭╶╶╶╶╶╶╶╮   \ninf o──\"INF:\"─»───[number]───»┴─\",\"─»┴┬─[lf]─»─1─┬┴┴»─[lf]─»───[url]────o\n                ╰╶╶╶╶╶╶╶╶╶╶╯          ╰────«─────╯           ╰╶╶╶╶╶╶╶╯   \n```\n\n* Optionals (`?`) are indicated by a forward arrow overhead.\n* Repeats ('+') are indicated by a backwards arrow underneath.\n* Literals (strings, chars, sets) are printed in purple.\n* Non-terminals are printed in cyan between square brackets.\n* Not-predicates (`!`) are overlined in red. Note that the diagram does not\n  make it clear that the input for not-predicates is not consumed.\n* Captures are boxed in a gray rectangle, optionally including the capture\n  name.\n\n[Here](/doc/example-railroad.png) is a a larger example of an URL parser.\n\n### Grammar graphs\n\nNPeg can generate a graphical representation of a grammar to show the relations\nbetween rules. The generated output is a `.dot` file which can be processed by\nthe Graphviz tool to generate an actual image file.\n\nWhen compiled with `-d:npegDotDir=<PATH>`, NPeg will generate a `.dot` file for\neach grammar in the code and write it to the given directory.\n\n![graph](/doc/example-graph.png)\n\n* Edge colors represent the rule relation:\n  grey=inline, blue=call, green=builtin\n\n* Rule colors represent the relative size/complexity of a rule:\n  black=<10, orange=10..100, red=>100\n\nLarge rules result in larger generated code and slow compile times. Rule size\ncan generally be decreased by changing the rule order in a grammar to allow\nNPeg to call rules instead of inlining them.\n\n\n### Tracing\n\nWhen compiled with `-d:npegTrace`, NPeg will dump its intermediate\nrepresentation of the compiled PEG, and will dump a trace of the execution\nduring matching. These traces can be used for debugging or optimization of a\ngrammar.\n\nFor example, the following program:\n\n```nim\nlet parser = peg \"line\":\n  space <- ' '\n  line <- word * *(space * word)\n  word <- +{'a'..'z'}\n\ndiscard parser.match(\"one two\")\n```\n\nwill output the following intermediate representation at compile time. From\nthe IR it can be seen that the `space` rule has been inlined in the `line`\nrule, but that the `word` rule has been emitted as a subroutine which gets\ncalled from `line`:\n\n```\nline:\n   0: line           opCall 6 word        word\n   1: line           opChoice 5           *(space * word)\n   2:  space         opStr \" \"            ' '\n   3: line           opCall 6 word        word\n   4: line           opPartCommit 2       *(space * word)\n   5:                opReturn\n\nword:\n   6: word           opSet '{'a'..'z'}'   {'a' .. 'z'}\n   7: word           opSpan '{'a'..'z'}'  +{'a' .. 'z'}\n   8:                opReturn\n```\n\nAt runtime, the following trace is generated. The trace consists of a number\nof columns:\n\n1. The current instruction pointer, which maps to the compile time dump.\n2. The index into the subject.\n3. The substring of the subject.\n4. The name of the rule from which this instruction originated.\n5. The instruction being executed.\n6. The backtrace stack depth.\n\n```\n  0|  0|one two                 |line           |call -> word:6                          |\n  6|  0|one two                 |word           |set {'a'..'z'}                          |\n  7|  1|ne two                  |word           |span {'a'..'z'}                         |\n  8|  3| two                    |               |return                                  |\n  1|  3| two                    |line           |choice -> 5                             |\n  2|  3| two                    | space         |chr \" \"                                 |*\n  3|  4|two                     |line           |call -> word:6                          |*\n  6|  4|two                     |word           |set {'a'..'z'}                          |*\n  7|  5|wo                      |word           |span {'a'..'z'}                         |*\n  8|  7|                        |               |return                                  |*\n  4|  7|                        |line           |pcommit -> 2                            |*\n  2|  7|                        | space         |chr \" \"                                 |*\n   |  7|                        |               |fail                                    |*\n  5|  7|                        |               |return (done)                           |\n```\n\nThe exact meaning of the IR instructions is not discussed here.\n\n\n## Compile-time configuration\n\nNPeg has a number of configurable setting which can be configured at compile\ntime by passing flags to the compiler. The default values should be ok in most\ncases, but if you ever run into one of those limits you are free to configure\nthose to your liking:\n\n* `-d:npegPattMaxLen=N` This is the maximum allowed length of NPeg's internal\n  representation of a parser, before it gets translated to Nim code. The reason\n  to check for an upper limit is that some grammars can grow exponentially by\n  inlining of patterns, resulting in slow compile times and oversized\n  executable size. (default: 4096)\n\n* `-d:npegInlineMaxLen=N` This is the maximum allowed length of a pattern to be\n  inlined. Inlining generally results in a faster parser, but also increases\n  code size. It is valid to set this value to 0; in that case NPeg will never\n  inline patterns and use a calling mechanism instead, this will result in the\n  smallest code size. (default: 50)\n\n* `-d:npegRetStackSize=N` Maximum allowed depth of the return stack for the\n  parser. The default value should be high enough for practical purposes, the\n  stack depth is only limited to detect invalid grammars. (default: 1024)\n\n* `-d:npegBackStackSize=N` Maximum allowed depth of the backtrace stack for the\n  parser. The default value should be high enough for practical purposes, the\n  stack depth is only limited to detect invalid grammars. (default: 1024)\n\n* `-d:npegGcsafe` This is a workaround for the case where NPeg needs to be used\n  from a `{.gcsafe.}` context when using threads. This will mark the generated\n  matching function to be `{.gcsafe.}`.\n\n\n## Tracing and debugging\n\nNPeg has a number of compile time flags to enable tracing and debugging of the\ngenerated parser:\n\n* `-d:npegTrace`: Enable compile time and run time tracing. Please refer to the \n  section 'Tracing' for more details.\n\n* `-d:npegGraph`: Dump syntax diagrams of all parsed rules at compile time.\n\nThese flags are meant for debugging NPeg itself, and are typically not useful\nto the end user:\n\n* `-d:npegDebug`: Enable more debug info. Meant for NPeg development debugging\n  purposes only.\n\n* `-d:npegExpand`: Dump the generated Nim code for all parsers defined in the\n  program. Meant for NPeg development debugging purposes only.\n\n* `-d:npegStacktrace`: When enabled, NPeg will dump a stack trace of the\n  current position in the parser when an exception is thrown by NPeg itself or\n  by Nim code in code block captures.\n\n\n## Random stuff and frequently asked questions\n\n\n### Why does NPeg not support regular PEG syntax?\n\nThe NPeg syntax is similar, but not exactly the same as the official PEG\nsyntax: it uses some different operators, and prefix instead of postfix\noperators. The reason for this is that the NPeg grammar is parsed by a Nim\nmacro in order to allow code block captures to embed Nim code, which puts some\nlimitations on the available syntax. Also, NPeg's operators are chosen so that\nthey have the right precedence for PEGs.\n\nThe result is that the grammer itself is expressed as valid Nim, which has the\nnice side effect of allowing syntax highlighting and code completion work with\nyour favorite editor.\n\n\n### Can NPeg be used to parse EBNF grammars?\n\nAlmost, but not quite. Although PEGS and EBNF look quite similar, there are\nsome subtle but important differences which do not allow a literal translation\nfrom EBNF to PEG. Notable differences are left recursion and ordered choice.\nAlso, see \"From EBNF to PEG\" from Roman R. Redziejowski.\n\n\n### NPeg and generic functions\n\nNim's macro system is sometimes finicky and not well defined, and NPeg seems to\npush it to the limit. This means that you might run into strange and\nunexpected issues, especially when mixing NPeg with generic code.\n\nIf you run into weird error messages that do not seem to make sense when using\nNPeg from generic procs, check the links below for more information and\npossible workarounds:\n\n- https://github.com/nim-lang/Nim/issues/22740\n- https://github.com/zevv/npeg/issues/68\n\n\n## Examples\n\n### Parsing arithmetic expressions\n\n```nim\nlet parser = peg \"line\":\n  exp      <- term   * *( ('+'|'-') * term)\n  term     <- factor * *( ('*'|'/') * factor)\n  factor   <- +{'0'..'9'} | ('(' * exp * ')')\n  line     <- exp * !1\n\ndoAssert parser.match(\"3*(4+15)+2\").ok\n```\n\n\n### A complete JSON parser\n\nThe following PEG defines a complete parser for the JSON language - it will not\nproduce any captures, but simple traverse and validate the document:\n\n```nim\nlet s = peg \"doc\":\n  S              <- *Space\n  jtrue          <- \"true\"\n  jfalse         <- \"false\"\n  jnull          <- \"null\"\n\n  unicodeEscape  <- 'u' * Xdigit[4]\n  escape         <- '\\\\' * ({ '{', '\"', '|', '\\\\', 'b', 'f', 'n', 'r', 't' } | unicodeEscape)\n  stringBody     <- ?escape * *( +( {'\\x20'..'\\xff'} - {'\"'} - {'\\\\'}) * *escape)\n  jstring         <- ?S * '\"' * stringBody * '\"' * ?S\n\n  minus          <- '-'\n  intPart        <- '0' | (Digit-'0') * *Digit\n  fractPart      <- \".\" * +Digit\n  expPart        <- ( 'e' | 'E' ) * ?( '+' | '-' ) * +Digit\n  jnumber         <- ?minus * intPart * ?fractPart * ?expPart\n\n  doc            <- JSON * !1\n  JSON           <- ?S * ( jnumber | jobject | jarray | jstring | jtrue | jfalse | jnull ) * ?S\n  jobject        <- '{' * ( jstring * \":\" * JSON * *( \",\" * jstring * \":\" * JSON ) | ?S ) * \"}\"\n  jarray         <- \"[\" * ( JSON * *( \",\" * JSON ) | ?S ) * \"]\"\n\ndoAssert s.match(json).ok\n\nlet doc = \"\"\" {\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1} \"\"\"\ndoAssert parser.match(doc).ok\n```\n\n\n### Captures\n\nThe following example shows how to use code block captures. The defined\ngrammar will parse a HTTP response document and extract structured data from\nthe document into a Nim object:\n\n```nim\nimport npeg, strutils, tables\n\ntype\n  Request = object\n    proto: string\n    version: string\n    code: int\n    message: string\n    headers: Table[string, string]\n\n# HTTP grammar (simplified)\n\nlet parser = peg(\"http\", userdata: Request):\n  space       <- ' '\n  crlf        <- '\\n' * ?'\\r'\n  url         <- +(Alpha | Digit | '/' | '_' | '.')\n  eof         <- !1\n  header_name <- +(Alpha | '-')\n  header_val  <- +(1-{'\\n'}-{'\\r'})\n  proto       <- >+Alpha:\n    userdata.proto = $1\n  version     <- >(+Digit * '.' * +Digit):\n    userdata.version = $1\n  code        <- >+Digit:\n    userdata.code = parseInt($1)\n  msg         <- >(+(1 - '\\r' - '\\n')):\n    userdata.message = $1\n  header      <- >header_name * \": \" * >header_val:\n    userdata.headers[$1] = $2\n  response    <- proto * '/' * version * space * code * space * msg\n  headers     <- *(header * crlf)\n  http        <- response * crlf * headers * eof\n\n\n# Parse the data and print the resulting table\n\nconst data = \"\"\"\nHTTP/1.1 301 Moved Permanently\nContent-Length: 162\nContent-Type: text/html\nLocation: https://nim.org/\n\"\"\"\n\nvar request: Request\nlet res = parser.match(data, request)\necho request\n```\n\nThe resulting data:\n\n```nim\n(\n  proto: \"HTTP\",\n  version: \"1.1\",\n  code: 301,\n  message: \"Moved Permanently\",\n  headers: {\n    \"Content-Length\": \"162\",\n    \"Content-Type\":\n    \"text/html\",\n    \"Location\": \"https://nim.org/\"\n  }\n)\n```\n\n\n### More examples\n\nMore examples can be found in tests/examples.nim.\n\n\n## Future directions / Todos / Roadmap / The long run\n\nHere are some things I'd like to have implemented one day. Some are hard and\nrequire me to better understand what I'm doing first. In no particular order:\n\n- Handling left recursion: PEGs are typically not good at handling grammar\n  invoking left recursion, see \n  https://en.wikipedia.org/wiki/Parsing_expression_grammar#Indirect_left_recursion\n  for an explanation of the problem. However, some smart people have found a way\n  to make this work anyway, but I am not yet able to understand this well enough\n  to implement this in NPeg.\n  https://github.com/zevv/npeg/blob/master/doc/papers/Left_recursion_in_parsing_expression_grammars.pdf\n\n- Design and implement a proper API for code block captures. The current API\n  feels fragile and fragmented (`capture[], $1/$2, fail(), validate()`), and\n  does not offer solid primitives to make custom match functions yet, something\n  better should be in place before NPeg goes v1.0.\n\n- Resuming/streaming: The current parser is almost ready to be invoked multiple\n  times, resuming parsing where it left off - this should allow parsing of\n  (infinite) streams. The only problem not solved yet is how to handle\n  captures: when a block of data is parsed it might contain data which must\n  later be available to collect the capture. Not sure how to handle this yet.\n\n- Memoization: I guess it would be possible to add (limited) memoization to \n  improve performance, but no clue where to start yet.\n\n- Parallelization: I wonder if parsing can parallelized: when reaching an\n  ordered choice, multiple threads should be able to try to parse each\n  individual choice. I do see problems with captures here, though.\n\n- I'm not happy about the `{.gcsafe.}` workaround. I'd be happy to hear any\n  ideas on how to improve this.\n\n"
  },
  {
    "path": "config.nims",
    "content": "--styleCheck:usages\nif (NimMajor, NimMinor) < (1, 6):\n  --styleCheck:hint\nelse:\n  --styleCheck:error\n"
  },
  {
    "path": "doc/README.md",
    "content": "\nThis directory contains various papers which were used for inspiration when\nbuilding Npeg.\n"
  },
  {
    "path": "doc/papers/README.md",
    "content": "\nThis is a collection of papers somehow relevant to NPeg.\n"
  },
  {
    "path": "misc/README",
    "content": "\nThis directory contains various snippets, examples or other helpful things\nthat I want to keep around but do not fit in elsewhere.\n"
  },
  {
    "path": "misc/indent.nim",
    "content": "  # Indent syntax\n\n  let data = \"\"\"\na=123\nb=\n  c=567\n  e=42\nf=18\ng=\n  b=44\n  c=22\n\"\"\"\n\n  var indentStack = @[\"\"]\n  template top[T](s: seq[T]): T = s[s.high]\n\n\n  let p = peg doc:\n    doc <- pairs * !1\n    pairs <- pair * *('\\n' * pair)\n    pair <- indSame * key * '=' * val\n    indentPairs <- '\\n' * &indIn * pairs * &('\\n' * indOut)\n    key <- +Alpha:\n      echo \"key \", $0\n    number <- +Digit:\n      echo \"val \", $0\n    val <- number | indentPairs\n\n    indSame <- *' ':\n      validate $0 == indentStack.top\n\n    indIn <- *' ':\n      validate len($0) > len(indentStack.top)\n      indentStack.add $0\n    \n    indOut <- *' ':\n      discard indentStack.pop\n      validate $0 == indentStack.top\n\n  echo p.match(data).ok\n"
  },
  {
    "path": "misc/java.nim",
    "content": "\n#\n# This grammar has been auto-generated with mouse2npeg from the Mouse Java-1.6\n# grammar at http://www.romanredz.se/Mouse/Java.1.6.peg. It is not nice to look\n# at, but it does parse Java\n# \n\nimport npeg\nlet r = peg CompilationUnit:\n  CompilationUnit <- Spacing * ?PackageDeclaration * *ImportDeclaration * *TypeDeclaration * EOT\n  PackageDeclaration <- *Annotation * PACKAGE * QualifiedIdentifier * SEMI\n  ImportDeclaration <- IMPORT * ?STATIC * QualifiedIdentifier * ?( DOT * STAR ) * SEMI\n  TypeDeclaration <- *Modifier * ( ClassDeclaration | EnumDeclaration | InterfaceDeclaration | AnnotationTypeDeclaration ) | SEMI\n  ClassDeclaration <- CLASS * Identifier * ?TypeParameters * ?( EXTENDS * ClassType ) * ?( IMPLEMENTS * ClassTypeList ) * ClassBody\n  ClassBody <- LWING * *ClassBodyDeclaration * RWING\n  ClassBodyDeclaration <- SEMI | ?STATIC * Block | *Modifier * MemberDecl\n  MemberDecl <- TypeParameters * GenericMethodOrConstructorRest | Type * Identifier * MethodDeclaratorRest | Type * VariableDeclarators * SEMI | VOID * Identifier * VoidMethodDeclaratorRest | Identifier * ConstructorDeclaratorRest | InterfaceDeclaration | ClassDeclaration | EnumDeclaration | AnnotationTypeDeclaration\n  GenericMethodOrConstructorRest <- ( Type | VOID ) * Identifier * MethodDeclaratorRest | Identifier * ConstructorDeclaratorRest\n  MethodDeclaratorRest <- FormalParameters * *Dim * ?( THROWS * ClassTypeList ) * ( MethodBody | SEMI )\n  VoidMethodDeclaratorRest <- FormalParameters * ?( THROWS * ClassTypeList ) * ( MethodBody | SEMI )\n  ConstructorDeclaratorRest <- FormalParameters * ?( THROWS * ClassTypeList ) * MethodBody\n  MethodBody <- Block\n  InterfaceDeclaration <- INTERFACE * Identifier * ?TypeParameters * ?( EXTENDS * ClassTypeList ) * InterfaceBody\n  InterfaceBody <- LWING * *InterfaceBodyDeclaration * RWING\n  InterfaceBodyDeclaration <- *Modifier * InterfaceMemberDecl | SEMI\n  InterfaceMemberDecl <- InterfaceMethodOrFieldDecl | InterfaceGenericMethodDecl | VOID * Identifier * VoidInterfaceMethodDeclaratorRest | InterfaceDeclaration | AnnotationTypeDeclaration | ClassDeclaration | EnumDeclaration\n  InterfaceMethodOrFieldDecl <- Type * Identifier * InterfaceMethodOrFieldRest\n  InterfaceMethodOrFieldRest <- ConstantDeclaratorsRest * SEMI | InterfaceMethodDeclaratorRest\n  InterfaceMethodDeclaratorRest <- FormalParameters * *Dim * ?( THROWS * ClassTypeList ) * SEMI\n  InterfaceGenericMethodDecl <- TypeParameters * ( Type | VOID ) * Identifier * InterfaceMethodDeclaratorRest\n  VoidInterfaceMethodDeclaratorRest <- FormalParameters * ?( THROWS * ClassTypeList ) * SEMI\n  ConstantDeclaratorsRest <- ConstantDeclaratorRest * *( COMMA * ConstantDeclarator )\n  ConstantDeclarator <- Identifier * ConstantDeclaratorRest\n  ConstantDeclaratorRest <- *Dim * EQU * VariableInitializer\n  EnumDeclaration <- ENUM * Identifier * ?( IMPLEMENTS * ClassTypeList ) * EnumBody\n  EnumBody <- LWING * ?EnumConstants * ?COMMA * ?EnumBodyDeclarations * RWING\n  EnumConstants <- EnumConstant * *( COMMA * EnumConstant )\n  EnumConstant <- *Annotation * Identifier * ?Arguments * ?ClassBody\n  EnumBodyDeclarations <- SEMI * *ClassBodyDeclaration\n  LocalVariableDeclarationStatement <- *( FINAL | Annotation ) * Type * VariableDeclarators * SEMI\n  VariableDeclarators <- VariableDeclarator * *( COMMA * VariableDeclarator )\n  VariableDeclarator <- Identifier * *Dim * ?( EQU * VariableInitializer )\n  FormalParameters <- LPAR * ?FormalParameterList * RPAR\n  FormalParameter <- *( FINAL | Annotation ) * Type * VariableDeclaratorId\n  LastFormalParameter <- *( FINAL | Annotation ) * Type * ELLIPSIS * VariableDeclaratorId\n  FormalParameterList <- FormalParameter * *( COMMA * FormalParameter ) * ?( COMMA * LastFormalParameter ) | LastFormalParameter\n  VariableDeclaratorId <- Identifier * *Dim\n  Block <- LWING * BlockStatements * RWING\n  BlockStatements <- *BlockStatement\n  BlockStatement <- LocalVariableDeclarationStatement | *Modifier * ( ClassDeclaration | EnumDeclaration ) | Statement\n  Statement <- Block | ASSERT * Expression * ?( COLON * Expression ) * SEMI | IF * ParExpression * Statement * ?( ELSE * Statement ) | FOR * LPAR * ?ForInit * SEMI * ?Expression * SEMI * ?ForUpdate * RPAR * Statement | FOR * LPAR * FormalParameter * COLON * Expression * RPAR * Statement | WHILE * ParExpression * Statement | DO * Statement * WHILE * ParExpression * SEMI | TRY * Block * ( +Catch * ?Finally | Finally ) | SWITCH * ParExpression * LWING * SwitchBlockStatementGroups * RWING | SYNCHRONIZED * ParExpression * Block | RETURN * ?Expression * SEMI | THROW * Expression * SEMI | BREAK * ?Identifier * SEMI | CONTINUE * ?Identifier * SEMI | SEMI | StatementExpression * SEMI | Identifier * COLON * Statement\n  Catch <- CATCH * LPAR * FormalParameter * RPAR * Block\n  Finally <- FINALLY * Block\n  SwitchBlockStatementGroups <- *SwitchBlockStatementGroup\n  SwitchBlockStatementGroup <- SwitchLabel * BlockStatements\n  SwitchLabel <- CASE * ConstantExpression * COLON | CASE * EnumConstantName * COLON | DEFAULT * COLON\n  ForInit <- *( FINAL | Annotation ) * Type * VariableDeclarators | StatementExpression * *( COMMA * StatementExpression )\n  ForUpdate <- StatementExpression * *( COMMA * StatementExpression )\n  EnumConstantName <- Identifier\n  StatementExpression <- Expression\n  ConstantExpression <- Expression\n  Expression <- ConditionalExpression * *( AssignmentOperator * ConditionalExpression )\n  AssignmentOperator <- EQU | PLUSEQU | MINUSEQU | STAREQU | DIVEQU | ANDEQU | OREQU | HATEQU | MODEQU | SLEQU | SREQU | BSREQU\n  ConditionalExpression <- ConditionalOrExpression * *( QUERY * Expression * COLON * ConditionalOrExpression )\n  ConditionalOrExpression <- ConditionalAndExpression * *( OROR * ConditionalAndExpression )\n  ConditionalAndExpression <- InclusiveOrExpression * *( ANDAND * InclusiveOrExpression )\n  InclusiveOrExpression <- ExclusiveOrExpression * *( OR * ExclusiveOrExpression )\n  ExclusiveOrExpression <- AndExpression * *( HAT * AndExpression )\n  AndExpression <- EqualityExpression * *( AND * EqualityExpression )\n  EqualityExpression <- RelationalExpression * *( ( EQUAL | NOTEQUAL ) * RelationalExpression )\n  RelationalExpression <- ShiftExpression * *( ( LE | GE | LT | GT ) * ShiftExpression | INSTANCEOF * ReferenceType )\n  ShiftExpression <- AdditiveExpression * *( ( SL | SR | BSR ) * AdditiveExpression )\n  AdditiveExpression <- MultiplicativeExpression * *( ( PLUS | MINUS ) * MultiplicativeExpression )\n  MultiplicativeExpression <- UnaryExpression * *( ( STAR | DIV | MOD ) * UnaryExpression )\n  UnaryExpression <- PrefixOp * UnaryExpression | LPAR * Type * RPAR * UnaryExpression | Primary * *( Selector ) * *( PostfixOp )\n  Primary <- ParExpression | NonWildcardTypeArguments * ( ExplicitGenericInvocationSuffix | THIS * Arguments ) | THIS * ?Arguments | SUPER * SuperSuffix | Literal | NEW * Creator | QualifiedIdentifier * ?IdentifierSuffix | BasicType * *Dim * DOT * CLASS | VOID * DOT * CLASS\n  IdentifierSuffix <- LBRK * ( RBRK * *Dim * DOT * CLASS | Expression * RBRK ) | Arguments | DOT * ( CLASS | ExplicitGenericInvocation | THIS | SUPER * Arguments | NEW * ?NonWildcardTypeArguments * InnerCreator )\n  ExplicitGenericInvocation <- NonWildcardTypeArguments * ExplicitGenericInvocationSuffix\n  NonWildcardTypeArguments <- LPOINT * ReferenceType * *( COMMA * ReferenceType ) * RPOINT\n  ExplicitGenericInvocationSuffix <- SUPER * SuperSuffix | Identifier * Arguments\n  PrefixOp <- INC | DEC | BANG | TILDA | PLUS | MINUS\n  PostfixOp <- INC | DEC\n  Selector <- DOT * Identifier * ?Arguments | DOT * ExplicitGenericInvocation | DOT * THIS | DOT * SUPER * SuperSuffix | DOT * NEW * ?NonWildcardTypeArguments * InnerCreator | DimExpr\n  SuperSuffix <- Arguments | DOT * ?NonWildcardTypeArguments * Identifier * ?Arguments\n  BasicType <- ( \"byte\" | \"short\" | \"char\" | \"int\" | \"long\" | \"float\" | \"double\" | \"boolean\" ) * !LetterOrDigit * Spacing\n  Arguments <- LPAR * ?( Expression * *( COMMA * Expression ) ) * RPAR\n  Creator <- ?NonWildcardTypeArguments * CreatedName * ClassCreatorRest | ?NonWildcardTypeArguments * ( ClassType | BasicType ) * ArrayCreatorRest\n  CreatedName <- Identifier * ?NonWildcardTypeArguments * *( DOT * Identifier * ?NonWildcardTypeArguments )\n  InnerCreator <- Identifier * ClassCreatorRest\n  ArrayCreatorRest <- LBRK * ( RBRK * *Dim * ArrayInitializer | Expression * RBRK * *DimExpr * *Dim )\n  ClassCreatorRest <- Arguments * ?ClassBody\n  ArrayInitializer <- LWING * ?( VariableInitializer * *( COMMA * VariableInitializer ) ) * ?COMMA * RWING\n  VariableInitializer <- ArrayInitializer | Expression\n  ParExpression <- LPAR * Expression * RPAR\n  QualifiedIdentifier <- Identifier * *( DOT * Identifier )\n  Dim <- LBRK * RBRK\n  DimExpr <- LBRK * Expression * RBRK\n  Type <- ( BasicType | ClassType ) * *Dim\n  ReferenceType <- BasicType * +Dim | ClassType * *Dim\n  ClassType <- Identifier * ?TypeArguments * *( DOT * Identifier * ?TypeArguments )\n  ClassTypeList <- ClassType * *( COMMA * ClassType )\n  TypeArguments <- LPOINT * TypeArgument * *( COMMA * TypeArgument ) * RPOINT\n  TypeArgument <- ReferenceType | QUERY * ?( ( EXTENDS | SUPER ) * ReferenceType )\n  TypeParameters <- LPOINT * TypeParameter * *( COMMA * TypeParameter ) * RPOINT\n  TypeParameter <- Identifier * ?( EXTENDS * Bound )\n  Bound <- ClassType * *( AND * ClassType )\n  Modifier <- Annotation | ( \"public\" | \"protected\" | \"private\" | \"static\" | \"abstract\" | \"final\" | \"native\" | \"synchronized\" | \"transient\" | \"volatile\" | \"strictfp\" ) * !LetterOrDigit * Spacing\n  AnnotationTypeDeclaration <- AT * INTERFACE * Identifier * AnnotationTypeBody\n  AnnotationTypeBody <- LWING * *AnnotationTypeElementDeclaration * RWING\n  AnnotationTypeElementDeclaration <- *Modifier * AnnotationTypeElementRest | SEMI\n  AnnotationTypeElementRest <- Type * AnnotationMethodOrConstantRest * SEMI | ClassDeclaration | EnumDeclaration | InterfaceDeclaration | AnnotationTypeDeclaration\n  AnnotationMethodOrConstantRest <- AnnotationMethodRest | AnnotationConstantRest\n  AnnotationMethodRest <- Identifier * LPAR * RPAR * ?DefaultValue\n  AnnotationConstantRest <- VariableDeclarators\n  DefaultValue <- DEFAULT * ElementValue\n  Annotation <- NormalAnnotation | SingleElementAnnotation | MarkerAnnotation\n  NormalAnnotation <- AT * QualifiedIdentifier * LPAR * ?ElementValuePairs * RPAR\n  SingleElementAnnotation <- AT * QualifiedIdentifier * LPAR * ElementValue * RPAR\n  MarkerAnnotation <- AT * QualifiedIdentifier\n  ElementValuePairs <- ElementValuePair * *( COMMA * ElementValuePair )\n  ElementValuePair <- Identifier * EQU * ElementValue\n  ElementValue <- ConditionalExpression | Annotation | ElementValueArrayInitializer\n  ElementValueArrayInitializer <- LWING * ?ElementValues * ?COMMA * RWING\n  ElementValues <- ElementValue * *( COMMA * ElementValue )\n  Spacing <- *( +{' ','\\t','\\r','\\n','\\x0c'} | \"/*\" * *( !\"*/\" * 1 ) * \"*/\" | \"//\" * *( !{'\\r','\\n'} * 1 ) * {'\\r','\\n'} )\n  Identifier <- !Keyword * Letter * *LetterOrDigit * Spacing\n  Letter <- {'a'..'z'} | {'A'..'Z'} | {'_','$'}\n  LetterOrDigit <- {'a'..'z'} | {'A'..'Z'} | {'0'..'9'} | {'_','$'}\n  Keyword <- ( \"abstract\" | \"assert\" | \"boolean\" | \"break\" | \"byte\" | \"case\" | \"catch\" | \"char\" | \"class\" | \"const\" | \"continue\" | \"default\" | \"double\" | \"do\" | \"else\" | \"enum\" | \"extends\" | \"false\" | \"finally\" | \"final\" | \"float\" | \"for\" | \"goto\" | \"if\" | \"implements\" | \"import\" | \"interface\" | \"int\" | \"instanceof\" | \"long\" | \"native\" | \"new\" | \"null\" | \"package\" | \"private\" | \"protected\" | \"public\" | \"return\" | \"short\" | \"static\" | \"strictfp\" | \"super\" | \"switch\" | \"synchronized\" | \"this\" | \"throws\" | \"throw\" | \"transient\" | \"true\" | \"try\" | \"void\" | \"volatile\" | \"while\" ) * !LetterOrDigit\n  ASSERT <- \"assert\" * !LetterOrDigit * Spacing\n  BREAK <- \"break\" * !LetterOrDigit * Spacing\n  CASE <- \"case\" * !LetterOrDigit * Spacing\n  CATCH <- \"catch\" * !LetterOrDigit * Spacing\n  CLASS <- \"class\" * !LetterOrDigit * Spacing\n  CONTINUE <- \"continue\" * !LetterOrDigit * Spacing\n  DEFAULT <- \"default\" * !LetterOrDigit * Spacing\n  DO <- \"do\" * !LetterOrDigit * Spacing\n  ELSE <- \"else\" * !LetterOrDigit * Spacing\n  ENUM <- \"enum\" * !LetterOrDigit * Spacing\n  EXTENDS <- \"extends\" * !LetterOrDigit * Spacing\n  FINALLY <- \"finally\" * !LetterOrDigit * Spacing\n  FINAL <- \"final\" * !LetterOrDigit * Spacing\n  FOR <- \"for\" * !LetterOrDigit * Spacing\n  IF <- \"if\" * !LetterOrDigit * Spacing\n  IMPLEMENTS <- \"implements\" * !LetterOrDigit * Spacing\n  IMPORT <- \"import\" * !LetterOrDigit * Spacing\n  INTERFACE <- \"interface\" * !LetterOrDigit * Spacing\n  INSTANCEOF <- \"instanceof\" * !LetterOrDigit * Spacing\n  NEW <- \"new\" * !LetterOrDigit * Spacing\n  PACKAGE <- \"package\" * !LetterOrDigit * Spacing\n  RETURN <- \"return\" * !LetterOrDigit * Spacing\n  STATIC <- \"static\" * !LetterOrDigit * Spacing\n  SUPER <- \"super\" * !LetterOrDigit * Spacing\n  SWITCH <- \"switch\" * !LetterOrDigit * Spacing\n  SYNCHRONIZED <- \"synchronized\" * !LetterOrDigit * Spacing\n  THIS <- \"this\" * !LetterOrDigit * Spacing\n  THROWS <- \"throws\" * !LetterOrDigit * Spacing\n  THROW <- \"throw\" * !LetterOrDigit * Spacing\n  TRY <- \"try\" * !LetterOrDigit * Spacing\n  VOID <- \"void\" * !LetterOrDigit * Spacing\n  WHILE <- \"while\" * !LetterOrDigit * Spacing\n  Literal <- ( FloatLiteral | IntegerLiteral | CharLiteral | StringLiteral | \"true\" * !LetterOrDigit | \"false\" * !LetterOrDigit | \"null\" * !LetterOrDigit ) * Spacing\n  IntegerLiteral <- ( HexNumeral | OctalNumeral | DecimalNumeral ) * ?{'l','L'}\n  DecimalNumeral <- \"0\" | {'1'..'9'} * *{'0'..'9'}\n  HexNumeral <- ( \"0x\" | \"0X\" ) * +HexDigit\n  HexDigit <- {'a'..'f'} | {'A'..'F'} | {'0'..'9'}\n  OctalNumeral <- \"0\" * +{'0'..'7'}\n  FloatLiteral <- HexFloat | DecimalFloat\n  DecimalFloat <- +Digit * \".\" * *Digit * ?Exponent * ?{'f','F','d','D'} | \".\" * +Digit * ?Exponent * ?{'f','F','d','D'} | +Digit * Exponent * ?{'f','F','d','D'} | +Digit * ?Exponent * {'f','F','d','D'}\n  Exponent <- {'e','E'} * ?{'+','\\\\','-'} * +Digit\n  Digit <- {'0'..'9'}\n  HexFloat <- HexSignificand * BinaryExponent * ?{'f','F','d','D'}\n  HexSignificand <- ( \"0x\" | \"0X\" ) * *HexDigit * \".\" * +HexDigit | HexNumeral * ?\".\"\n  BinaryExponent <- {'p','P'} * ?{'+','\\\\','-'} * +Digit\n  CharLiteral <- \"\\'\" * ( Escape | !{'\\'','\\\\','\\n','\\r'} * 1 ) * \"\\'\"\n  StringLiteral <- \"\\\"\" * *( Escape | !{'\"','\\\\','\\n','\\r'} * 1 ) * \"\\\"\"\n  Escape <- \"\\\\\" * ( {'b','t','n','f','r','\"','\\'','\\\\'} | OctalEscape | UnicodeEscape )\n  OctalEscape <- {'0'..'3'} * {'0'..'7'} * {'0'..'7'} | {'0'..'7'} * {'0'..'7'} | {'0'..'7'}\n  UnicodeEscape <- +\"u\" * HexDigit * HexDigit * HexDigit * HexDigit\n  AT <- \"@\" * Spacing\n  AND <- \"&\" * !{'=','&'} * Spacing\n  ANDAND <- \"&&\" * Spacing\n  ANDEQU <- \"&=\" * Spacing\n  BANG <- \"!\" * !\"=\" * Spacing\n  BSR <- \">>>\" * !\"=\" * Spacing\n  BSREQU <- \">>>=\" * Spacing\n  COLON <- \":\" * Spacing\n  COMMA <- \",\" * Spacing\n  DEC <- \"--\" * Spacing\n  DIV <- \"/\" * !\"=\" * Spacing\n  DIVEQU <- \"/=\" * Spacing\n  DOT <- \".\" * Spacing\n  ELLIPSIS <- \"...\" * Spacing\n  EQU <- \"=\" * !\"=\" * Spacing\n  EQUAL <- \"==\" * Spacing\n  GE <- \">=\" * Spacing\n  GT <- \">\" * !{'=','>'} * Spacing\n  HAT <- \"^\" * !\"=\" * Spacing\n  HATEQU <- \"^=\" * Spacing\n  INC <- \"++\" * Spacing\n  LBRK <- \"[\" * Spacing\n  LE <- \"<=\" * Spacing\n  LPAR <- \"(\" * Spacing\n  LPOINT <- \"<\" * Spacing\n  LT <- \"<\" * !{'=','<'} * Spacing\n  LWING <- \"{\" * Spacing\n  MINUS <- \"-\" * !{'=','\\\\','-'} * Spacing\n  MINUSEQU <- \"-=\" * Spacing\n  MOD <- \"%\" * !\"=\" * Spacing\n  MODEQU <- \"%=\" * Spacing\n  NOTEQUAL <- \"!=\" * Spacing\n  OR <- \"|\" * !{'=','|'} * Spacing\n  OREQU <- \"|=\" * Spacing\n  OROR <- \"||\" * Spacing\n  PLUS <- \"+\" * !{'=','+'} * Spacing\n  PLUSEQU <- \"+=\" * Spacing\n  QUERY <- \"?\" * Spacing\n  RBRK <- \"]\" * Spacing\n  RPAR <- \")\" * Spacing\n  RPOINT <- \">\" * Spacing\n  RWING <- \"}\" * Spacing\n  SEMI <- \";\" * Spacing\n  SL <- \"<<\" * !\"=\" * Spacing\n  SLEQU <- \"<<=\" * Spacing\n  SR <- \">>\" * !{'=','>'} * Spacing\n  SREQU <- \">>=\" * Spacing\n  STAR <- \"*\" * !\"=\" * Spacing\n  STAREQU <- \"*=\" * Spacing\n  TILDA <- \"~\" * Spacing\n  EOT <- !1\n\n"
  },
  {
    "path": "misc/mouse2npeg.nim",
    "content": "#\n# Convert a Mouse PEG grammar into NPeg grammar\n# http://www.romanredz.se/Mouse/\n#\n\nimport npeg\nimport npeg/common\nimport strutils\n\n# Parse the Mouse grammar into an ASTNode tree\n\nlet mouse = peg \"mouse\":\n  mouse     <- A(\"mouse\", *rule) * ?s * !1\n  rule      <- ?s * A(\"rule\", >name * s * \"=\" * s * patt)\n  patt      <- A(\"patt\", choice * ?sem * s * ';')\n  sem       <- ('{' * @'}')\n  choice    <- A(\"choice\", seq * s * *('/' * s * seq))\n  seq       <- A(\"seq\", prefixed * *(s * prefixed) * s)\n  nonterm   <- A(\"nonterm\", >name)\n  prefixed  <- A(\"pre\", ?>'!' * postfixed)\n  postfixed <- A(\"post\", (paren | nonterm | lit) * >?postfix)\n  lit       <- any | range | set | string\n  any       <- A(\"any\", '_')\n  range     <- A(\"range\", '[' * >(char * '-' * char) * ']')\n  set       <- A(\"set\", '[' * +(char-']') * ']')\n  string    <- A(\"string\", '\"' * +(char-'\"') * '\"')\n  paren     <- A(\"paren\", '(' * s * choice * s * ')')\n  postfix   <- {'+','*','?'}\n  name      <- +Alpha\n  char      <- A(\"char\", >( (\"\\\\u\" * Xdigit[4]) | ('\\\\' * {'\\\\','r','n','t','\"'}) | 1))\n  nl        <- {'\\r','\\n'}\n  s         <- *( +Space | comment | sem )\n  comment   <- \"//\" * >*(1-nl)\n\n\n# Dump the PEG ast tree into NPeg form\n\nproc dump(a: ASTNode): string =\n  proc unescapeChar(s: string): string =\n    if s == \"'\":\n      result = \"\\\\'\"\n    elif s == \"\\\\\":\n      result = \"\\\\\\\\\"\n    elif s.len == 6:\n      result = $(parseHexInt(s[2..5]).char.escapeChar)\n    else:\n      result = s\n  case a.id:\n    of \"mouse\":\n      for c in a:\n        result.add dump(c)\n    of \"rule\":\n      return \"  \" & $a.val & \" <- \" & dump(a[\"patt\"]) & \"\\n\"\n    of \"patt\":\n      return dump a[0]\n    of \"choice\":\n      var parts: seq[string]\n      for c in a:\n        parts.add dump(c)\n      return parts.join(\" | \")\n    of \"seq\":\n      var parts: seq[string]\n      for c in a:\n        parts.add dump(c)\n      return parts.join(\" * \")\n    of \"paren\":\n      return \"( \" & dump(a[0]) & \" )\"\n    of \"pre\":\n      return a.val & dump(a[0])\n    of \"post\":\n      return a.val & dump(a[0])\n    of \"nonterm\":\n      return a.val\n    of \"any\":\n      return \"1\"\n    of \"string\":\n      result.add '\"'\n      for c in a:\n        result.add unescapeChar(c.val)\n      result.add '\"'\n    of \"set\":\n      var cs: seq[string]\n      for c in a: cs.add unescapeChar(c.val)\n      return \"{'\" & cs.join(\"','\") & \"'}\"\n    of \"range\":\n      return \"{'\" & escapeChar(a.val[0]) & \"'..'\" & escapeChar(a.val[2]) & \"'}\"\n    else:\n      echo \"\\nUnhnandled \" & a.id\n      quit 1\n\n\n# http://www.romanredz.se/Mouse/Java.1.6.peg\n\nlet r = mouse.matchFile(\"/tmp/Java.1.6.peg\")\nif not r.ok:\n  echo \"Error parsing at \", r.matchMax\n  quit 1\n\necho \"import npeg\"\necho \"let r = peg CompilationUnit:\"\n\necho dump(r.capturesAst())\n\n"
  },
  {
    "path": "misc/rod.nim",
    "content": "import npeg\nimport strutils\n\n# Rod AST node types\n\ntype\n  NodeKind* = enum\n    nkEmpty\n    nkScript, nkBlock\n    nkBool, nkNumber, nkString, nkIdent\n    nkPrefix, nkInfix, nkDot, nkIndex\n    nkVar, nkLet\n    nkIf, nkWhile, nkFor\n    nkBreak, nkContinue\n    nkCall\n    nkGeneric\n    nkObject, nkObjFields, nkObjConstr\n  Node* = ref object\n    ln*, col*: int\n    file*: string\n    case kind*: NodeKind\n    of nkEmpty: discard\n    of nkBool:\n      boolVal*: bool\n    of nkNumber:\n      numberVal*: float\n    of nkString:\n      stringVal*: string\n    of nkIdent:\n      ident*: string\n    else:\n      children*: seq[Node]\n\ntype\n  ParseStack = seq[Node]\n\n\n# Pretty printing\n\nproc `$`*(node: Node, showLineInfo = false): string =\n  const LeafNodes = { nkEmpty, nkBool, nkNumber, nkString, nkIdent, nkPrefix, nkInfix }\n  case node.kind\n  of nkEmpty: result = \"<empty>\"\n  of nkBool: result = $node.boolVal\n  of nkNumber: result = $node.numberVal\n  of nkString: result = escape(node.stringVal)\n  of nkIdent: result = node.ident\n  else:\n    result = (if showLineInfo: $node.ln & \":\" & $node.col & \" \" else: \"\") &\n             \"(\" & (case node.kind\n                    of nkPrefix, nkInfix: \"\"\n                    else: $node.kind & \" \")\n    for i, child in node.children:\n      if child.kind notin LeafNodes and node.children.len > 1:\n        result.add(\"\\n\")\n        result.add(indent(`$`(child, showLineInfo), 2))\n      else:\n        if i > 0:\n          result.add(\" \")\n        result.add(`$`(child, showLineInfo))\n    result.add(\")\")\n\nproc `$`*(ps: ParseStack): string =\n  for i, n in ps:\n    result &= $i & \":\\n\" & $n & \"\\n\"\n  result &= \"\\n\"\n\n\n\nproc addToParent(ps: var ParseStack, ns: varargs[Node]) =\n  ps[ps.high].children.add ns\n\nproc swap(ps: var ParseStack) =\n  ps.add ps[ps.high-1]\n  ps.delete ps.high-2\n\nlet p = peg(rod, ps: ParseStack):\n\n  S <- *Space\n\n  # Basic tokens\n\n  tokColon    <- \":\" * S\n  tokEquals   <- \"=\" * S\n  tokComma    <- \",\" * S\n  tokPlus     <- \"+\" * S\n  tokMinus    <- \"-\" * S\n  tokMul      <- \"*\" * S\n  tokDiv      <- \"/\" * S\n  tokParOpen  <- \"(\" * S\n  tokParClose <- \")\" * S\n  tokCurOpen  <- \"{\" * S\n  tokCurClose <- \"}\" * S\n  tokVar      <- \"var\" * S\n  tokLet      <- \"let\" * S\n  tokIf       <- \"if\" * S\n  tokElif     <- \"elif\" * S\n  tokElse     <- \"else\" * S\n  tokWhile    <- \"while\" * S\n  tokObject   <- \"object\" * S\n  \n  keyWords    <- \"var\" | \"let\" | \"if\" | \"elif\" | \"else\" | \"while\" | \"object\"\n\n  # Atoms\n\n  tokNumber   <- >+Digit * S:\n    ps.add Node(kind: nkNumber, numberVal: parseFloat($1))\n\n  tokType     <- Alpha * *Alnum * S\n  \n  tokBool     <- >(\"true\" | \"false\") * S:\n    ps.add Node(kind: nkBool, boolval: $1 == \"true\")\n\n  tokIdent    <- >((Alpha * *Alnum) - keyWords) * S:\n    ps.add Node(kind: nkIdent, ident: $1)\n\n  # Block\n\n  blockOpen   <- tokCurOpen:\n    ps.add Node(kind: nkBlock)\n\n  blockStmt   <- stmt:\n    ps.addToParent ps.pop()\n\n  blockSec    <- blockOpen * *blockStmt * tokCurClose\n\n  # Var section\n\n  varOpen     <- (tokVar | tokLet):\n    ps.add Node(kind: nkVar)\n  \n  varDef      <- tokIdent * ?(tokColon * tokType) * ?(tokEquals * exprSec):\n    ps.swap()\n    ps.addToParent Node(kind: nkVar,\n                        children: @[Node(kind: nkIdent, ident: \"=\"), ps.pop(), ps.pop()])\n\n  varSec      <- varOpen * +varDef * *(tokComma * varDef):\n    ps.add ps.pop()\n\n  # While statement\n\n  whileSec    <- tokWhile * exprSec * blockSec:\n    ps.swap()\n    ps.add Node(kind: nkWhile, children: @[ps.pop(), ps.pop()])\n\n  # If expressions\n\n  ifOpen      <- tokIf * exprSec * blockSec:\n    let (nBlock, nExpr) = (ps.pop(), ps.pop())\n    ps.add Node(kind: nkIf, children: @[nExpr, nBlock])\n\n  ifElif      <- (tokElif * exprSec * blockSec):\n    ps.swap()\n    ps.addtoParent ps.pop(), ps.pop()\n\n  ifElse      <- ?(tokElse * blockSec):\n    ps.addToParent ps.pop()\n\n  ifExpr      <- ifOpen * *ifElif * ?ifElse\n\n  # Object\n\n  objectSec   <- tokObject * tokIdent * tokCurOpen * objFields * tokCurClose\n\n  objFields   <- tokIdent * *(tokComma * tokIdent) * tokColon * tokType\n\n  stmt        <- blockSec | varSec | objectSec | whileSec | exprSec\n\n  rod         <- S * +stmt * !1\n\n  # Expressions: Pratt parser\n\n  exprSec <- exp\n\n  exp <- S * prefix * *infix\n\n  prefix <- ifExpr | tokBool | tokNumber | parenExp | uniMinus | tokIdent\n  uniMinus <- >'-' * exp\n  parenExp <- ( tokParOpen * exp * tokParClose ) ^ 0\n\n  infix <- >(\"not\" | \"->\" | \"$\")                     * exp ^ 1 |\n           >(\"=\")                                    * exp ^ 2 |\n           >(\"or\" | \"xor\")                           * exp ^ 3 |\n           >(\"and\")                                  * exp ^ 4 |\n           >(\"==\" | \"<=\" | \"<\" | \">=\" | \">\" | \"!=\" | \n             \"in\" | \"notin\" | \"is\" | \"isnot\" | \"of\") * exp ^ 5 |\n           >(\"..\" | \"..<\")                           * exp ^ 6 |\n           >(\"&\")                                    * exp ^ 7 |\n           >(\"+\" | \"-\")                              * exp ^ 8 |\n           >(\"*\" | \"/\" | \"%\")                        * exp ^ 9 |\n           >(\"div\" | \"mod\" | \"shl\" | \"shr\")          * exp ^ 10 |\n           >(\"^\")                                    * exp ^^ 11:\n\n    let (f2, f1) = (ps.pop(), ps.pop())\n    ps.add Node(kind: nkInfix, children:\n                @[Node(kind: nkIdent, ident: $1), f1, f2])\n\n\nproc compile(source:string) =\n  var ps: ParseStack\n  echo \"---------------\"\n  echo source\n  if p.match(source, ps).ok:\n    echo \"---------------\"\n    let n = Node(kind: nkBlock, children: ps)\n    echo n\n\nwhen false:\n  compile \"\"\"\n    if a > 3 {\n      var w = 42\n    }\n  \"\"\"\n\nwhen false:\n    compile(\"\"\"\n    var\n      a = 2 + 2,\n      b = 2 + a\n    \"\"\")\n  \nwhen true:\n    compile(\"\"\"\n    { var a = 10\n      { var a = a } }\n    { var a = 12\n      a = a + 3 }\n    \"\"\")\n  \nwhen false:\n    compile(\"\"\"\n    let x = true\n    if x {\n      var x = 2\n    } \n    \"\"\")\n\nwhen false:\n    compile(\"\"\"\n    let x = true\n    if x {\n      var x = 2\n    } elif false {\n      var y = 3\n    } elif false {\n      var z = 4\n    } else {\n      var w = 5\n    }\n    \"\"\")\n\nwhen false:\n    compile(\"\"\"\n    let x = if true { 2 }\n            else { 4 }\n    \"\"\")\n \nwhen false:\n    compile(\"\"\"\n    let x = true\n    while x {\n      let y = 1\n    }\n    \"\"\")\n\nwhen false:\n    compile(\"\"\"\n    while true {\n      let y = 1\n    }\n    \"\"\")\n\nwhen false:\n    compile(\"\"\"\n    while false {\n      let y = 1\n    }\n    \"\"\")\n\nwhen false:\n    compile(\"\"\"\n    var\n      x = 0,\n      stop = false\n    while x {\n    }\n    \"\"\")\n"
  },
  {
    "path": "npeg.nimble",
    "content": "# Package\n\nversion       = \"1.3.0\"\nauthor        = \"Ico Doornekamp\"\ndescription   = \"a PEG library\"\nlicense       = \"MIT\"\nsrcDir        = \"src\"\ninstallExt    = @[\"nim\"]\n\n# Dependencies\n\nrequires \"nim >= 0.19.0\"\n\n# Test\n\ntask test, \"Runs the test suite\":\n  exec \"nimble testc && nimble testcpp && nimble testarc && nimble testjs\"\n\ntask testc, \"C tests\":\n  exec \"nim c -r tests/tests.nim\"\n\ntask testcpp, \"CPP tests\":\n  exec \"nim cpp -r tests/tests.nim\"\n\ntask testjs, \"JS tests\":\n  exec \"nim js -r tests/tests.nim\"\n\ntask testdanger, \"Runs the test suite in danger mode\":\n  exec \"nim c -d:danger -r tests/tests.nim\"\n\ntask testwin, \"Mingw tests\":\n  exec \"nim c -d:mingw tests/tests.nim && wine tests/tests.exe\"\n\ntask test32, \"32 bit tests\":\n  exec \"nim c --cpu:i386 --passC:-m32 --passL:-m32 tests/tests.nim && tests/tests\"\n\ntask testall, \"Test all\":\n  exec \"nimble test && nimble testcpp && nimble testdanger && nimble testjs && nimble testwin\"\n\nwhen (NimMajor, NimMinor) >= (1, 1):\n  task testarc, \"--gc:arc tests\":\n    exec \"nim c --gc:arc -r tests/tests.nim\"\nelse:\n  task testarc, \"--gc:arc tests\":\n    exec \"true\"\n\ntask perf, \"Test performance\":\n  exec \"nim cpp -r -d:danger tests/performance.nim\"\n"
  },
  {
    "path": "src/npeg/capture.nim",
    "content": "\nimport strutils\nimport sequtils\nimport npeg/[stack,common]\n\ntype\n\n  Capture*[S] = object\n    ck: CapKind\n    si*: int\n    name: string\n    len: int\n    when S is char:\n      s*: string\n    else:\n      s*: S\n\n  Captures*[S] = object\n    capList*: seq[Capture[S]]\n\n  FixMethod* = enum\n    FixAll, FixOpen\n\n# Search the capStack for cftOpen matching the cftClose on top\n\nproc findTop[S](capStack: var Stack[CapFrame[S]], fm: FixMethod): int =\n  if fm == FixOpen:\n    var i = capStack.top - 1\n    var depth = 0\n    while true:\n      if capStack[i].cft == cftClose: inc depth else: dec depth\n      if depth == 0: break\n      dec i\n    result = i\n\n# Convert all closed CapFrames on the capture stack to a list of Captures, all\n# consumed frames are removed from the CapStack\n\nproc fixCaptures*[S](s: openArray[S], capStack: var Stack[CapFrame[S]], fm: FixMethod): Captures[S] =\n\n  assert capStack.top > 0\n  assert capStack.peek.cft == cftClose\n  when npegDebug: echo $capStack\n\n  # Convert the closed frames to a seq[Capture]\n\n  var stack = initStack[int](\"captures\", 8)\n  let iFrom = findTop(capStack, fm)\n\n  for i in iFrom..<capStack.top:\n    let c = capStack[i]\n    if c.cft == cftOpen:\n      stack.push result.capList.len\n      result.capList.add Capture[S](ck: c.ck, si: c.si, name: c.name)\n    else:\n      let i2 = stack.pop()\n      assert result[i2].ck == c.ck\n      result.capList[i2].s = if c.ck == ckPushed:\n        c.sPushed\n      else:\n        s.slice(result[i2].si, c.si)\n      result.capList[i2].len = result.capList.len - i2 - 1\n  assert stack.top == 0\n\n  # Remove closed captures from the cap stack\n\n  capStack.top = iFrom\n\n\nproc collectCaptures*[S](caps: Captures[S]): Captures[S] =\n  result = Captures[S](\n    capList: caps.capList.filterIt(it.ck in {ckVal, ckPushed, ckCodeBlock})\n  )\n\nproc collectCapturesRef*(caps: Captures): Ref =\n  for cap in caps.capList:\n    result.key = cap.name\n    result.val = cap.s\n\n# The `Captures[S]` type is a seq wrapped in an object to allow boundary\n# checking on acesses with nicer error messages. The procs below allow easy\n# access to the captures from Nim code.\n\nproc getCapture[S](cs: Captures[S], i: int): Capture[S] =\n  if i >= cs.capList.len:\n    let msg = \"Capture out of range, \" & $i & \" is not in [0..\" & $cs.capList.high & \"]\"\n    raise newException(NPegCaptureOutOfRangeError, msg)\n  cs.capList[i]\n\nproc `[]`*[S](cs: Captures[S], i: int): Capture[S] =\n  cs.getCapture(i)\n\nproc `[]`*[S](cs: Captures[S], i: BackwardsIndex): Capture[S] =\n  cs.getCapture(cs.capList.len-i.int)\n\nproc `[]`*[S](cs: Captures[S], range: HSlice[system.int, system.int]): seq[Capture[S]] =\n  for i in range:\n    result.add cs.getCapture(i)\n\niterator items*[S](captures: Captures[S]): Capture[S] =\n  for c in captures.capList:\n    yield c\n\nproc len*[S](captures: Captures[S]): int =\n  captures.capList.len\n\n"
  },
  {
    "path": "src/npeg/codegen.nim",
    "content": "\nimport macros except quote, stamp\nimport strutils\nimport tables\nimport npeg/[common,patt,stack,capture]\n\ntype\n\n  RetFrame = int\n\n  BackFrame = object\n    ip*: int # Instruction pointer\n    si*: int # Subject index\n    rp*: int # Retstack top pointer\n    cp*: int # Capstack top pointer\n    pp*: int # PrecStack top pointer\n\n  PrecFrame = int\n\n  MatchResult*[S] = object\n    ok*: bool\n    matchLen*: int\n    matchMax*: int\n    cs*: Captures[S]\n\n  MatchState*[S] = object\n    ip*: int\n    si*: int\n    simax*: int\n    refs*: Table[string, string]\n    retStack*: Stack[RetFrame]\n    capStack*: Stack[CapFrame[S]]\n    backStack*: Stack[BackFrame]\n    precStack*: Stack[PrecFrame]\n\n  Parser*[S, T] = object\n    fn_init*: proc(): MatchState[S]\n    when npegGcsafe:\n      fn_run*: proc(ms: var MatchState[S], s: openArray[S], u: var T): MatchResult[S] {.gcsafe.}\n    else:\n      fn_run*: proc(ms: var MatchState[S], s: openArray[S], u: var T): MatchResult[S]\n\nwhen declared(macros.stamp): # nimskull\n  template quote(body: untyped): NimNode =\n    macros.stamp(body)\nelse:\n  template quote(body: untyped): NimNode =\n    macros.quote(body)\n\n# This macro translates `$1`.. into `capture[1].s`.. and `@1` into `capture[1].si` \n# for use in code block captures. The source nimnode lineinfo is recursively\n# copied to the newly genreated node to make sure \"Capture out of range\"\n# exceptions are properly traced.\n\nproc doSugar(n, captureId: NimNode): NimNode =\n  proc cli(n2: NimNode) =\n    n2.copyLineInfo(n)\n    for nc in n2: cli(nc)\n  let isIntPrefix =  n.kind == nnkPrefix and n[0].kind == nnkIdent and n[1].kind == nnkIntLit\n  if isIntPrefix and n[0].eqIdent(\"$\"):\n    result = newDotExpr(nnkBracketExpr.newTree(captureId, n[1]), ident(\"s\"))\n    cli result\n  elif isIntPrefix and n[0].eqIdent(\"@\"):\n    result = newDotExpr(nnkBracketExpr.newTree(captureId, n[1]), ident(\"si\"))\n    cli result\n  else:\n    result = copyNimNode(n)\n    for nc in n:\n      result.add doSugar(nc, captureId)\n\n\n# Generate the parser main loop. The .computedGoto. pragma will generate code\n# using C computed gotos, which will get highly optmized, mostly eliminating\n# the inner parser loop. Nim limits computed goto to a maximum of 10_000\n# cases; if our program is this large, emit a warning and do not use a\n# computed goto\n\nproc genLoopCode(program: Program, casesCode: NimNode): NimNode=\n  result = nnkWhileStmt.newTree(true.newLit, nnkStmtList.newTree())\n  if program.patt.len < 10_000:\n    result[1].add nnkPragma.newTree(\"computedGoto\".ident)\n  else:\n    warning \"Grammar too large for computed goto, falling back to normal 'case'\"\n  result[1].add casesCode\n  \n\n# Generate out all the case handlers for the parser program\n\nproc genCasesCode*(program: Program, sType, uType, uId: NimNode, ms, s, si, simax, ip: NimNode): NimNode =\n\n  result = quote:\n    case `ip`\n\n  for ipNow, i in program.patt.pairs:\n\n    let\n      ipNext = ipNow + 1\n      opName = newLit(repeat(\" \", i.indent) & ($i.op).toLowerAscii[2..^1])\n      iname = newLit(i.name)\n      ipFail = if i.failOffset == 0:\n        program.patt.high\n      else:\n        ipNow + i.failOffset\n\n    var call = case i.op:\n\n      of opChr:\n        let ch = newLit(i.ch)\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, \"\\\"\" & escapeChar(`ch`) & \"\\\"\"\n          if `si` < `s`.len and `s`[`si`] == `ch`.char:\n            inc `si`\n            `ip` = `ipNext`\n          else:\n            `ip` = `ipFail`\n\n      of opLit:\n        let lit = i.lit\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, `lit`.repr\n          if `si` < `s`.len and `s`[`si`] == `lit`:\n            inc `si`\n            `ip` = `ipNext`\n          else:\n            `ip` = `ipFail`\n\n      of opSet:\n        let cs = newLit(i.cs)\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, dumpSet(`cs`)\n          if `si` < `s`.len and `s`[`si`] in `cs`:\n            inc `si`\n            `ip` = `ipNext`\n          else:\n            `ip` = `ipFail`\n\n      of opSpan:\n        let cs = newLit(i.cs)\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, dumpSet(`cs`)\n          while `si` < `s`.len and `s`[`si`] in `cs`:\n            inc `si`\n          `ip` = `ipNext`\n\n      of opChoice:\n        let ip2 = newLit(ipNow + i.ipOffset)\n        let siOffset = newLit(i.siOffset)\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, $`ip2`\n          push(`ms`.backStack, BackFrame(ip:`ip2`, si:`si`+`siOffset`, rp:`ms`.retStack.top, cp:`ms`.capStack.top, pp:`ms`.precStack.top))\n          `ip` = `ipNext`\n\n      of opCommit:\n        let ip2 = newLit(ipNow + i.ipOffset)\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, $`ip2`\n          discard pop(`ms`.backStack)\n          `ip` = `ip2`\n\n      of opCall:\n        let label = newLit(i.callLabel)\n        let ip2 = newLit(ipNow + i.callOffset)\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, `label` & \":\" & $`ip2`\n          push(`ms`.retStack, `ipNext`)\n          `ip` = `ip2`\n\n      of opJump:\n        let label = newLit(i.callLabel)\n        let ip2 = newLit(ipNow + i.callOffset)\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, `label` & \":\" & $`ip2`\n          `ip` = `ip2`\n\n      of opCapOpen:\n        let capKind = newLit(i.capKind)\n        let capName = newLit(i.capName)\n        let capSiOffset = newLit(i.capSiOffset)\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, $`capKind` & \" -> \" & $`si`\n          push(`ms`.capStack, CapFrame[`sType`](cft: cftOpen, si: `si`+`capSiOffset`, ck: `capKind`, name: `capName`))\n          `ip` = `ipNext`\n\n      of opCapClose:\n        let ck = newLit(i.capKind)\n\n        case i.capKind:\n          of ckCodeBlock:\n            let captureId = ident \"capture\"\n            let code = doSugar(i.capAction, captureId)\n            quote:\n              trace `ms`, `iname`, `opName`, `s`, \"ckCodeBlock -> \" & $`si`\n              push(`ms`.capStack, CapFrame[`sType`](cft: cftClose, si: `si`, ck: `ck`))\n              let capture = collectCaptures(fixCaptures[`sType`](`s`, `ms`.capStack, FixOpen))\n              proc fn(`captureId`: Captures[`sType`], `ms`: var MatchState[`sType`], `uId`: var `uType`): bool =\n                result = true\n                `code`\n              if fn(capture, `ms`, `uId`):\n                `ip` = `ipNext`\n              else:\n                `ip` = `ipFail`\n\n          of ckRef:\n            quote:\n              trace `ms`, `iname`, `opName`, `s`, \"ckRef -> \" & $`si`\n              push(`ms`.capStack, CapFrame[`sType`](cft: cftClose, si: `si`, ck: `ck`))\n              let r = collectCapturesRef(fixCaptures[`sType`](`s`, `ms`.capStack, FixOpen))\n              `ms`.refs[r.key] = r.val\n              `ip` = `ipNext`\n\n          else:\n            quote:\n              trace `ms`, `iname`, `opName`, `s`, $`ck` & \" -> \" & $`si`\n              push(`ms`.capStack, CapFrame[`sType`](cft: cftClose, si: `si`, ck: `ck`))\n              `ip` = `ipNext`\n\n      of opBackref:\n        let refName = newLit(i.refName)\n        quote:\n          if `refName` in `ms`.refs:\n            let s2 = `ms`.refs[`refName`]\n            trace `ms`, `iname`, `opName`, `s`, `refName` & \":\\\"\" & s2 & \"\\\"\"\n            if subStrCmp(`s`, `s`.len, `si`, s2):\n              inc `si`, s2.len\n              `ip` = `ipNext`\n            else:\n              `ip` = `ipFail`\n          else:\n            raise newException(NPegUnknownBackrefError, \"Unknown back reference '\" & `refName` & \"'\")\n\n      of opErr:\n        let msg = newLit(i.msg)\n        quote:\n          trace `ms`, `iname`, `opName`, `s`, `msg`\n          var e = newException(NPegParseError, `msg`)\n          `simax` = max(`simax`, `si`)\n          raise e\n\n      of opReturn:\n        quote:\n          trace `ms`, `iname`, `opName`, `s`\n          if `ms`.retStack.top > 0:\n            `ip` = pop(`ms`.retStack)\n          else:\n            result.ok = true\n            `simax` = max(`simax`, `si`)\n            break\n\n      of opAny:\n        quote:\n          trace `ms`, `iname`, `opName`, `s`\n          if `si` < `s`.len:\n            inc `si`\n            `ip` = `ipNext`\n          else:\n            `ip` = `ipFail`\n\n      of opNop:\n        quote:\n          trace `ms`, `iname`, `opName`, `s`\n          `ip` = `ipNext`\n\n      of opPrecPush:\n        if i.prec == 0:\n          quote:\n            push(`ms`.precStack, 0)\n            `ip` = `ipNext`\n        else:\n          let (iPrec, iAssoc) = (i.prec.newLit, i.assoc.newLit)\n          let exp = if i.assoc == assocLeft:\n            quote: peek(`ms`.precStack) < `iPrec`\n          else:\n            quote: peek(`ms`.precStack) <= `iPrec`\n          quote:\n            if `exp`:\n              push(`ms`.precStack, `iPrec`)\n              `ip` = `ipNext`\n            else:\n              `ip` = `ipFail`\n\n      of opPrecPop:\n        quote:\n            discard `ms`.precStack.pop()\n            `ip` = `ipNext`\n\n      of opFail:\n        quote:\n          `simax` = max(`simax`, `si`)\n          if `ms`.backStack.top > 0:\n            trace `ms`, \"\", \"opFail\", `s`, \"(backtrack)\"\n            let t = pop(`ms`.backStack)\n            (`ip`, `si`, `ms`.retStack.top, `ms`.capStack.top, `ms`.precStack.top) = (t.ip, t.si, t.rp, t.cp, t.pp)\n          else:\n            trace `ms`, \"\", \"opFail\", `s`, \"(error)\"\n            break\n\n    # Recursively copy the line info from the original instruction NimNode into\n    # the generated Nim code\n    proc aux(n: NimNode) =\n      n.copyLineInfo(i.nimNode)\n      for nc in n: aux(nc)\n    aux(call)\n\n    result.add nnkOfBranch.newTree(newLit(ipNow), call)\n\n\n# Generate code for tracing the parser. An empty stub is generated if tracing\n# is disabled\n\nproc genTraceCode*(program: Program, sType, uType, uId, ms, s, si, simax, ip: NimNode): NimNode =\n  \n  when npegTrace:\n    result = quote:\n      proc doTrace[sType](`ms`: var MatchState, iname, opname: string, ip: int, s: openArray[sType], si: int, ms: var MatchState, msg: string) {.nimcall.} =\n          echo align(if ip >= 0: $ip else: \"\", 3) &\n            \"|\" & align($(peek(ms.precStack)), 3) &\n            \"|\" & align($si, 3) &\n            \"|\" & alignLeft(dumpSubject(s, si, 24), 24) &\n            \"|\" & alignLeft(iname, 15) &\n            \"|\" & alignLeft(opname & \" \" & msg, 40) &\n            \"|\" & repeat(\"*\", ms.backStack.top)\n\n      template trace(`ms`: var MatchState, iname, opname: string, `s`: openArray[`sType`], msg = \"\") =\n        doTrace(`ms`, iname, opname, `ip`, `s`, `si`, `ms`, msg)\n\n  else:\n    result = quote:\n      template trace(`ms`: var MatchState, iname, opname: string, `s`: openArray[`sType`], msg = \"\") =\n        discard\n\n\n# Augment exception stack traces with the NPeg return stack and re-raise\n\nproc genExceptionCode(ms, ip, si, simax, symTab: NimNode): NimNode =\n  quote:\n\n    # Helper proc to add a stack frame for the given ip\n    var trace: seq[StackTraceEntry]\n    let symTab = `symTab`\n    proc aux(ip: int) =\n      let sym = symTab[ip]\n      trace.insert StackTraceEntry(procname: cstring(sym.repr), filename: cstring(sym.lineInfo.filename), line: sym.lineInfo.line)\n      # On older Nim versions e.trace is not accessible, in this case just\n      # dump the exception to stdout if npgStacktrace is enabled\n      when npegStacktrace:\n        echo $(sym.lineInfo) & \": \" & sym.repr\n\n    # Emit current IP and unwind all addresses from the return stack\n    aux(`ip`)\n    while `ms`.retStack.top > 0:\n      aux(`ms`.retStack.pop())\n\n    let e = getCurrentException()\n\n    when compiles(e.trace.pop()):\n      # drop the generated parser fn() from the trace and replace by the NPeg frames\n      discard e.trace.pop()\n      e.trace.add trace\n\n    # Re-reaise the exception with the augmented stack trace and match index filled in\n    if e of NPegException:\n      let eref = (ref NPegException)(e)\n      eref.matchLen = `si`\n      eref.matchMax = `simax`\n    raise\n\n\n# Convert the list of parser instructions into a Nim finite state machine\n#\n# - sType is the base type of the subject; typically `char` but can be specified\n#   to be another type by the user\n# - uType is the type of the userdata, if not used this defaults to `bool`\n# - uId is the identifier of the userdata, if not used this defaults to `userdata`\n\nproc genCode*(program: Program, sType, uType, uId: NimNode): NimNode =\n\n  let\n    count = program.patt.high\n    suffix = \"_NP\"\n    ms = ident \"ms\" & suffix\n    s = ident \"s\" & suffix\n    si = ident \"si\" & suffix\n    ip = ident \"ip\" & suffix\n    simax = ident \"simax\" & suffix\n\n    casesCode = genCasesCode(program, sType, uType, uId, ms, s, si, simax, ip)\n    loopCode = genLoopCode(program, casesCode)\n    traceCode = genTraceCode(program, sType, uType, uId, ms, s, si, simax, ip)\n    exceptionCode = genExceptionCode(ms, ip, si, simax, newLit(program.symTab))\n\n  result = quote:\n\n    proc fn_init(): MatchState[`sType`] {.gensym.} =\n      result = MatchState[`sType`](\n        retStack: initStack[RetFrame](\"return\", 8, npegRetStackSize),\n        capStack: initStack[CapFrame[`sType`]](\"capture\", 8),\n        backStack: initStack[BackFrame](\"backtrace\", 8, npegBackStackSize),\n        precStack: initStack[PrecFrame](\"precedence\", 8, 16),\n      )\n      push(result.precStack, 0)\n\n\n    proc fn_run(`ms`: var MatchState[`sType`], `s`: openArray[`sType`], `uId`: var `uType`): MatchResult[`sType`] {.gensym.} =\n\n      # Create local instances of performance-critical MatchState vars, this\n      # saves a dereference on each access\n\n      var\n        `ip`: range[0..`count`] = `ms`.ip\n        `si` = `ms`.si\n        `simax` = `ms`.simax\n\n      # These templates are available for code blocks\n\n      template validate(o: bool) {.used.} =\n        if not o: return false\n\n      template fail() {.used.} =\n        return false\n\n      template push(`s`: string|`sType`) {.used.} =\n        push(`ms`.capStack, CapFrame[`sType`](cft: cftOpen, ck: ckPushed))\n        push(`ms`.capStack, CapFrame[`sType`](cft: cftClose, ck: ckPushed, sPushed: `s`))\n\n      # Emit trace and loop code\n\n      try:\n        `traceCode`\n        `loopCode`\n      except CatchableError:\n        `exceptionCode`\n\n      # When the parsing machine is done, copy the local copies of the\n      # matchstate back, close the capture stack and collect all the captures\n      # in the match result\n\n      `ms`.ip = `ip`\n      `ms`.si = `si`\n      `ms`.simax = `simax`\n      result.matchLen = `ms`.si\n      result.matchMax = `ms`.simax\n      if result.ok and `ms`.capStack.top > 0:\n        result.cs = fixCaptures(`s`, `ms`.capStack, FixAll)\n\n    # This is the result of genCode: a Parser object with two function\n    # pointers: fn_init: initializes a MatchState object for this parser\n    # fn_run: performs the parsing of the subject on the given matchstate\n\n    Parser[`sType`,`uType`](fn_init: fn_init, fn_run: fn_run)\n\n  when npegGcsafe:\n    result[0].addPragma(ident(\"gcsafe\"))\n\n  when npegExpand:\n    echo repr result\n\n"
  },
  {
    "path": "src/npeg/common.nim",
    "content": "\nimport strutils\nimport tables\nimport macros\nimport bitops\n\n\nconst\n\n  # Some constants with \"sane\" defaults, configurable with compiler flags\n\n  npegPattMaxLen* {.intdefine.} = 4096\n  npegInlineMaxLen* {.intdefine.} = 30\n  npegRetStackSize* {.intdefine.} = 1024\n  npegBackStackSize* {.intdefine.} = 1024\n  npegOptimize* {.intdefine.} = 255\n  npegDebug* = defined(npegDebug)\n  npegTrace* = defined(npegTrace)\n  npegExpand* = defined(npegExpand)\n  npegGraph* = defined(npegGraph)\n  npegGcsafe* = defined(npegGcsafe)\n  npegStacktrace* = defined(npegStacktrace)\n\n  # Various optimizations. These can be disabled for testing purposes\n  # or when suspecting bugs in the optimization stages\n\n  npegOptSets* = npegOptimize.testBit(0)\n  npegOptHeadFail* = npegOptimize.testBit(1)\n  npegOptCapShift* = npegOptimize.testBit(2)\n  npegOptChoiceCommit* = npegOptimize.testBit(3)\n\ntype\n\n  NPegException* = object of CatchableError\n    matchLen*: int\n    matchMax*: int\n\n  NPegParseError* = object of NPegException\n  NPegStackOverflowError* = object of NPegException\n  NPegUnknownBackrefError* = object of NPegException\n  NPegCaptureOutOfRangeError* = object of NPegException\n\n  CapFrameType* = enum cftOpen, cftClose\n\n  CapKind* = enum\n    ckVal,          # Value capture\n    ckPushed,       # Pushed capture\n    ckCodeBlock,    # Code block capture\n    ckRef           # Reference\n\n  CapFrame*[S] = object\n    cft*: CapFrameType # Capture frame type\n    name*: string      # Capture name\n    si*: int           # Subject index\n    ck*: CapKind       # Capture kind\n    when S is char:\n      sPushed*: string # Pushed capture, overrides subject slice\n    else:\n      sPushed*: S      # Pushed capture, overrides subject slice\n\n  Ref* = object\n    key*: string\n    val*: string\n\n  Opcode* = enum\n    opChr,          # Matching: Character\n    opLit,          # Matching: Literal\n    opSet,          # Matching: Character set and/or range\n    opAny,          # Matching: Any character\n    opNop,          # Matching: Always matches, consumes nothing\n    opSpan          # Matching: Match a sequence of 0 or more character sets\n    opChoice,       # Flow control: stores current position\n    opCommit,       # Flow control: commit previous choice\n    opCall,         # Flow control: call another rule\n    opJump,         # Flow control: jump to target\n    opReturn,       # Flow control: return from earlier call\n    opFail,         # Fail: unwind stack until last frame\n    opCapOpen,      # Capture open\n    opCapClose,     # Capture close\n    opBackref       # Back reference\n    opErr,          # Error handler\n    opPrecPush,     # Precedence stack push\n    opPrecPop,      # Precedence stack pop\n\n  CharSet* = set[char]\n\n  Assoc* = enum assocLeft, assocRight\n\n  Inst* = object\n    case op*: Opcode\n      of opChoice, opCommit:\n        ipOffset*: int\n        siOffset*: int\n      of opChr:\n        ch*: char\n      of opLit:\n        lit*: NimNode\n      of opCall, opJump:\n        callLabel*: string\n        callOffset*: int\n      of opSet, opSpan:\n        cs*: CharSet\n      of opCapOpen, opCapClose:\n        capKind*: CapKind\n        capAction*: NimNode\n        capName*: string\n        capSiOffset*: int\n      of opErr:\n        msg*: string\n      of opFail, opReturn, opAny, opNop, opPrecPop:\n        discard\n      of opBackref:\n        refName*: string\n      of opPrecPush:\n        prec*: int\n        assoc*: Assoc\n    failOffset*: int\n    # Debug info\n    name*: string\n    nimNode*: NimNode\n    indent*: int\n\n  Patt* = seq[Inst]\n\n  Symbol* = object\n    ip*: int\n    name*: string\n    repr*: string\n    lineInfo*: LineInfo\n\n  SymTab* = object\n    syms*: seq[Symbol]\n\n  Rule* = object\n    name*: string\n    patt*: Patt\n    repr*: string\n    lineInfo*: LineInfo\n\n  Program* = object\n    patt*: Patt\n    symTab*: SymTab\n\n  Template* = ref object\n    name*: string\n    args*: seq[string]\n    code*: NimNode\n\n  Grammar* = ref object\n    rules*: Table[string, Rule]\n    templates*: Table[string, Template]\n\n#\n# SymTab implementation\n#\n\nproc add*(s: var SymTab, ip: int, name: string, repr: string = \"\", lineInfo: LineInfo = LineInfo()) =\n  let symbol = Symbol(ip: ip, name: name, repr: repr, lineInfo: lineInfo)\n  s.syms.add(symbol)\n\nproc `[]`*(s: SymTab, ip: int): Symbol =\n  for sym in s.syms:\n    if ip >= sym.ip:\n      result = sym\n\nproc `[]`*(s: SymTab, name: string): Symbol =\n  for sym in s.syms:\n    if name == sym.name:\n      return sym\n\nproc contains*(s: SymTab, ip: int): bool =\n  for sym in s.syms:\n    if ip == sym.ip:\n      return true\n\nproc contains*(s: SymTab, name: string): bool =\n  for sym in s.syms:\n    if name == sym.name:\n      return true\n\n#\n# Some glue to report parse errors without having to pass the original\n# NimNode all the way down the call stack\n#\n\nvar gCurErrorNode {.compileTime} = newEmptyNode()\n\nproc setKrakNode*(n: NimNode) =\n  gCurErrorNode.copyLineInfo(n)\n\ntemplate krak*(n: NimNode, msg: string) =\n  error \"NPeg: error at '\" & n.repr & \"': \" & msg & \"\\n\", n\n\ntemplate krak*(msg: string) =\n  krak gCurErrorNode, msg\n\n\n#\n# Misc helper functions\n#\n\nproc subStrCmp*(s: openArray[char], slen: int, si: int, s2: string): bool =\n  if si > slen - s2.len:\n    return false\n  for i in 0..<s2.len:\n    if s[si+i] != s2[i]:\n      return false\n  return true\n\n\nproc subIStrCmp*(s: openArray[char], slen: int, si: int, s2: string): bool =\n  if si > slen - s2.len:\n    return false\n  for i in 0..<s2.len:\n    if s[si+i].toLowerAscii != s2[i].toLowerAscii:\n      return false\n  return true\n\n\nproc truncate*(s: string, len: int): string =\n  result = s\n  if result.len > len:\n    result = result[0..len-1] & \"...\"\n\n# This macro flattens AST trees of `|` operators into a single call to\n# `choice()` with all arguments in one call. e.g, it will convert `A | B | C`\n# into `call(A, B, C)`.\n\nproc flattenChoice*(n: NimNode, nChoice: NimNode = nil): NimNode =\n  proc addToChoice(n, nc: NimNode) =\n    if n.kind == nnkInfix and n[0].eqIdent(\"|\"):\n      addToChoice(n[1], nc)\n      addToChoice(n[2], nc)\n    else:\n      nc.add flattenChoice(n)\n  if n.kind == nnkInfix and n[0].eqIdent(\"|\"):\n    result = nnkCall.newTree(ident \"choice\")\n    addToChoice(n[1], result)\n    addToChoice(n[2], result)\n  else:\n    result = copyNimNode(n)\n    for nc in n:\n      result.add flattenChoice(nc)\n\n\n# Create a short and friendly text representation of a character set.\n\nproc escapeChar*(c: char): string =\n  const escapes = { '\\n': \"\\\\n\", '\\r': \"\\\\r\", '\\t': \"\\\\t\" }.toTable()\n  if c in escapes:\n    result = escapes[c]\n  elif c >= ' ' and c <= '~':\n    result = $c\n  else:\n    result = \"\\\\x\" & toHex(c.int, 2).toLowerAscii\n\nproc dumpSet*(cs: CharSet): string =\n  result.add \"{\"\n  var c = 0\n  while c <= 255:\n    let first = c\n    while c <= 255 and c.char in cs:\n      inc c\n    if (c - 1 == first):\n      result.add \"'\" & escapeChar(first.char) & \"',\"\n    elif c - 1 > first:\n      result.add \"'\" & escapeChar(first.char) & \"'..'\" & escapeChar((c-1).char) & \"',\"\n    inc c\n  if result[result.len-1] == ',': result.setLen(result.len-1)\n  result.add \"}\"\n\n# Create a friendly version of the given string, escaping not-printables\n# and no longer then `l`\n\nproc dumpSubject*[S](s: openArray[S], o:int=0, l:int=1024): string =\n  var i = o\n  while i < s.len:\n    when S is char:\n      let a = escapeChar s[i]\n    else:\n      mixin repr\n      let a = s[i].repr\n    if result.len >= l-a.len:\n      return\n    result.add a\n    inc i\n\n\nproc `$`*(i: Inst, ip=0): string =\n  var args: string\n  case i.op:\n    of opChr:\n      args = \" '\" & escapeChar(i.ch) & \"'\"\n    of opChoice, opCommit:\n      args = \" \" & $(ip+i.ipOffset)\n    of opCall, opJump:\n      args = \" \" & $(ip+i.callOffset)\n    of opCapOpen, opCapClose:\n      args = \" \" & $i.capKind\n      if i.capSiOffset != 0:\n        args &= \"(\" & $i.capSiOffset & \")\"\n    of opBackref:\n      args = \" \" & i.refName\n    of opPrecPush:\n      args = \" @\" & $i.prec\n    else:\n      discard\n  if i.failOffset != 0:\n    args.add \" \" & $(ip+i.failOffset)\n  let tmp = if i.nimNode != nil: i.nimNode.repr.truncate(30) else: \"\"\n  result.add alignLeft(i.name, 15) &\n             alignLeft(repeat(\" \", i.indent) & ($i.op).toLowerAscii[2..^1] & args, 25) & \" \" & tmp\n\nproc `$`*(program: Program): string =\n  for ip, i in program.patt.pairs:\n    if ip in program.symTab:\n      result.add \"\\n\" & program.symTab[ip].repr & \"\\n\"\n    result.add align($ip, 4) & \": \" & `$`(i, ip) & \"\\n\"\n\n\nproc slice*(s: openArray[char], iFrom, iTo: int): string =\n  let len = iTo - iFrom\n  result.setLen(len)\n  for i in 0..<len:\n    result[i] = s[i+iFrom]\n\nproc slice*[S](s: openArray[S], iFrom, iTo: int): S =\n  result = s[iFrom]\n\nproc `$`*(t: Template): string =\n  return t.name & \"(\" & t.args.join(\", \") & \") = \" & t.code.repr\n\n"
  },
  {
    "path": "src/npeg/dot.nim",
    "content": "\nimport tables\nimport strutils\n\ntype\n  Dot* = ref object\n    name: string\n    edges: Table[string, bool]\n    nodes: seq[string]\n\nconst colors = {\n  \"inline\": \"grey60\",\n  \"call\": \"blue\",\n}.toTable()\n\n\nproc escape(s: string): string =\n  return s.replace(\".\", \"_\").replace(\"-\", \"_\")\n\nproc newDot*(name: string): Dot =\n  return Dot(name: name)\n\nproc add*(d: Dot, n1, n2: string, meth: string) =\n  if d != nil:\n    let l = \"  \" & n1.escape & \" -> \" & n2.escape & \" [ color=\" & colors[meth] & \"];\"\n    d.edges[l] = true\n\nproc addPatt*(d: Dot, name: string, len: int) =\n  if d != nil:\n    var color = \"black\"\n    if len > 10: color = \"orange\"\n    if len > 100: color = \"red\"\n    d.nodes.add \"  \" & name.escape &\n                \" [ fillcolor=lightgrey color=\" & color & \" label=\\\"\" & name & \"/\" & $len & \"\\\"];\"\n\nproc dump*(d: Dot) =\n  const npegDotDir {.strdefine.}: string = \"\"\n  when npegDotDir != \"\":\n    let fname = npegDotDir & \"/\" & d.name & \".dot\"\n    echo \"Dumping dot graph file to \" & fname & \"...\"\n\n    var o: string\n    o.add \"digraph dot {\\n\"\n    o.add \"  graph [ center=true, margin=0.2, nodesep=0.1, ranksep=0.3 ];\\n\"\n    o.add \"  node [ shape=box, style=\\\"rounded,filled\\\" width=0, height=0, fontname=Helvetica, fontsize=10];\\n\"\n    o.add \"  edge [ fontname=Helvetica, fontsize=10];\\n\"\n    for k, v in d.edges:\n      o.add k & \"\\n\"\n    for n in d.nodes:\n      o.add n & \"\\n\"\n    o.add \"}\\n\"\n    writeFile fname, o\n\n"
  },
  {
    "path": "src/npeg/grammar.nim",
    "content": "\nimport tables\nimport macros\nimport strutils\nimport npeg/[common,dot]\n\n# This is the global instance of pattern library. This is itself a grammar\n# where all patterns are stored with qualified names in the form of\n# <libname>.<pattname>.  At grammar link time all unresolved patterns are\n# looked up from this global table.\n\nvar gPattLib {.compileTime.} = new Grammar\n\n\n\n# Store a grammar in the library.  The rule names and all unqualified\n# identifiers in the grammar are expanded to qualified names in the form\n# <libname>.<pattname> to make sure they are easily resolved when they are\n# later imported by other grammars.\n\nproc libStore*(libName: string, grammar: Grammar) =\n\n  proc qualify(name: string): string =\n    if libName.len > 0: libName & \".\" & name else: name\n\n  for rulename, rule in grammar.rules:\n    var rulename2 = qualify(rulename)\n    var rule2 = Rule(name: rulename2)\n    for i in rule.patt.items:\n      var i2 = i\n      if i2.op == opCall:\n        if \".\" notin i2.callLabel:\n          i2.callLabel = qualify(i2.callLabel)\n      rule2.patt.add i2\n    gPattLib.rules[rulename2] = rule2\n\n  for tname, t in grammar.templates:\n    gPattLib.templates[qualify(tname)] = t\n\n#\n# Add rule to a grammer\n#\n\nproc addRule*(grammar: Grammar, name: string, patt: Patt, repr: string = \"\", lineInfo: LineInfo = LineInfo()) =\n  if name in grammar.rules:\n    warning \"Redefinition of rule '\" & name & \"'\"\n  var rule = Rule(name: name, patt: patt, repr: repr, lineInfo: lineInfo)\n  for i in rule.patt.mitems:\n    if i.name == \"\":\n      i.name = name\n  grammar.rules[name] = rule\n\n# Try to import the given rule from the pattern library into a grammar. Returns\n# true if import succeeded, false if not found.\n\nproc libImportRule*(name: string, grammar: Grammar): bool =\n  if name in gPattLib.rules:\n    grammar.addRule name, gPattLib.rules[name].patt\n    when npegDebug:\n      echo \"importing \", name\n    return true\n\n\nproc libImportTemplate*(name: string): Template =\n  if name in gPattLib.templates:\n    result = gPattLib.templates[name]\n\n\n# Shadow the given name in the grammar by creating an unique new name,\n# and moving the original rule\n\nproc shadow*(grammar: Grammar, name: string): string =\n  var gShadowId {.global.} = 0\n  inc gShadowId\n  let name2 = name & \"-\" & $gShadowId\n  when npegDebug:\n    echo \"  shadow \", name, \" -> \", name2\n  grammar.rules[name2] = grammar.rules[name]\n  grammar.rules.del name\n  return name2\n\n\n# Link a list of patterns into a grammar, which is itself again a valid\n# pattern. Start with the initial rule, add all other non terminals and fixup\n# opCall addresses\n\nproc link*(grammar: Grammar, initial_name: string, dot: Dot = nil): Program =\n\n  if initial_name notin grammar.rules:\n    error \"inital rule '\" & initial_name & \"' not found\"\n\n  var retPatt: Patt\n  var symTab: SymTab\n  var ruleRepr: Table[int, string]\n\n  # Recursively emit a pattern and all patterns it calls which are\n  # not yet emitted\n\n  proc emit(name: string) =\n    if npegDebug:\n      echo \"emit \", name\n    let rule = grammar.rules[name]\n    if rule.patt.len > 0:\n      let ip = retPatt.len\n      symTab.add(ip, name, rule.repr, rule.lineInfo)\n      retPatt.add rule.patt\n      retPatt.add Inst(op: opReturn, name: rule.patt[0].name)\n\n    for i in rule.patt:\n      if i.op == opCall and i.callLabel notin symTab:\n        if i.callLabel notin grammar.rules and not libImportRule(i.callLabel, grammar):\n          error \"Npeg: rule \\\"\" & name & \"\\\" is referencing undefined rule \\\"\" & i.callLabel & \"\\\"\"\n        dot.add(name, i.callLabel, \"call\")\n        emit i.callLabel\n\n  emit initial_name\n\n  # Fixup call addresses and do tail call optimization\n\n  for ip, i in retPatt.mpairs:\n    if i.op == opCall:\n      i.callOffset = symTab[i.callLabel].ip - ip\n    if i.op == opCall and retPatt[ip+1].op == opReturn:\n      i.op = opJump\n\n  # Choice/Commit pairs that touch because of head fail optimization can be\n  # replaced by a jump and a nop\n\n  when npegOptChoiceCommit:\n    for i in 0..<retPatt.high:\n      if retPatt[i+0].op == opChoice and retPatt[i+1].op == opCommit:\n        retPatt[i+0] = Inst(op: opJump, callOffset: retPatt[i+1].ipOffset + 1)\n        retPatt[i+1] = Inst(op: opNop)\n\n  # Trailing opFail is used by the codegen\n\n  symTab.add(retPatt.len, \"_fail\")\n  retPatt.add Inst(op: opFail)\n\n  # Calc indent level for instructions\n\n  var indent = 0\n  for ip, i in retPatt.mpairs:\n    if i.op in {opCapClose, opCommit}: dec indent\n    i.indent = indent\n    if i.op in {opCapOpen, opChoice}: inc indent\n\n  result = Program(patt: retPatt, symTab: symTab)\n\n  when npegTrace:\n    echo result\n\n"
  },
  {
    "path": "src/npeg/lib/core.nim",
    "content": "\n#\n# This library file is special: it is imported by default, and provides rules\n# which do not live in a separate namespace.\n#\n\nwhen defined(nimHasUsed): {.used.}\n\nimport npeg\n\ngrammar \"\":\n  Alnum  <- {'A'..'Z','a'..'z','0'..'9'} # Alphanumeric characters\n  Alpha  <- {'A'..'Z','a'..'z'}          # Alphabetic characters\n  Blank  <- {' ','\\t'}                   # Space and tab\n  Cntrl  <- {'\\x00'..'\\x1f','\\x7f'}      # Control characters\n  Digit  <- {'0'..'9'}                   # Digits\n  Graph  <- {'\\x21'..'\\x7e'}             # Visible characters\n  Lower  <- {'a'..'z'}                   # Lowercase characters\n  Print  <- {'\\x21'..'\\x7e',' '}         # Visible characters and spaces\n  Space  <- {'\\9'..'\\13',' '}            # Whitespace characters\n  Upper  <- {'A'..'Z'}                   # Uppercase characters\n  Xdigit <- {'A'..'F','a'..'f','0'..'9'} # Hexadecimal digits\n\n"
  },
  {
    "path": "src/npeg/lib/rfc3339.nim",
    "content": "\n#\n# This library provides a number of common types\n#\n\nimport npeg\n\nwhen defined(nimHasUsed): {.used.}\n\ngrammar \"rfc3339\":\n\n   date_fullyear   <- Digit[4]\n   date_month      <- Digit[2]  # 01-12\n   date_mday       <- Digit[2]  # 01-28, 01-29, 01-30, 01-31 based on\n                                # month/year\n   time_hour       <- Digit[2]  # 00-23\n   time_minute     <- Digit[2]  # 00-59\n   time_second     <- Digit[2]  # 00-58, 00-59, 00-60 based on leap second\n                               # rules\n   time_secfrac    <- \".\" * +Digit\n   time_numoffset  <- (\"+\" | \"-\") * time_hour * \":\" * time_minute\n   time_offset     <- \"Z\" | time_numoffset\n\n   partial_time    <- time_hour * \":\" * time_minute * \":\" * time_second * ?time_secfrac\n   full_date       <- date_fullyear * \"-\" * date_month * \"-\" * date_mday\n   full_time       <- partial_time * time_offset\n\n   date_time       <- full_date * (\"T\" | \" \") * full_time\n"
  },
  {
    "path": "src/npeg/lib/types.nim",
    "content": "\n#\n# This library provides a number of common types\n#\n\nimport npeg\n\nwhen defined(nimHasUsed): {.used.}\n\ntemplate checkRange*(T: typedesc, parseFn: untyped, s: string): bool =\n  let v = parseFn(s).BiggestInt\n  v >= T.low.BiggestInt and v <= T.high.BiggestInt\n\ngrammar \"types\":\n\n  bool    <- \"true\" | \"false\"\n\n  # Unsigned decimal\n\n  uint    <- +Digit\n  uint8   <- >+uint: validate checkRange(uint8,  parseInt, $1)\n  uint16  <- >+uint: validate checkRange(uint16, parseInt, $1)\n  uint32  <- >+uint: validate checkRange(uint32, parseInt, $1)\n\n  # Signed decimal\n\n  int     <- ?'-' * uint\n  int8    <- >int: validate checkRange(int8,   parseInt, $1)\n  int16   <- >int: validate checkRange(int16,  parseInt, $1)\n  int32   <- >int: validate checkRange(int32,  parseInt, $1)\n  int64   <- >int: validate checkRange(int64,  parseInt, $1)\n\n  # Hexadecimal\n\n  hex    <- '0' * {'x','X'} * +Digit\n  hex8   <- >+uhex: validate checkRange(uint8,  parseHexInt, $1)\n  hex16  <- >+uhex: validate checkRange(uint16, parseHexInt, $1)\n  hex32  <- >+uhex: validate checkRange(uint32, parseHexInt, $1)\n\n"
  },
  {
    "path": "src/npeg/lib/uri.nim",
    "content": "import npeg\n\nwhen defined(nimHasUsed): {.used.}\n\n# The grammar below is a literal translation of the ABNF notation of the\n# RFC. Optimizations can be made to limit backtracking, but this is a nice\n# example how to create a parser from a RFC protocol description.\n\ngrammar \"uri\":\n\n  URI <- scheme * \":\" * hier_part * ?( \"?\" * query) * ?( \"#\" * fragment) * !1\n\n  hier_part <- \"//\" * authority * path\n\n  URI_reference <- uri | relative_ref\n\n  absolute_uri <- scheme * \":\" * hier_part * ?( \"?\" * query)\n\n  relative_ref <- relative_part * ?( \"?\" * query) * ?( \"#\" * fragment)\n\n  relative_part <- \"//\" * authority * path_abempty |\n                   path_absolute |\n                   path_noscheme |\n                   path_empty\n\n  scheme <- (Alpha * *( Alpha | Digit | \"+\" | \"-\" | \".\" ))\n\n  authority <- ?(userinfo * \"@\") * host * ?( \":\" * port)\n  userinfo <- *(unreserved | pct_encoded | sub_delims | \":\")\n\n  host <- (IP_literal | IPv4address | reg_name)\n  port <- *Digit\n\n  IP_literal <- \"[\" * (IPv6address | IPvFuture) * \"]\"\n\n  IPvFuture <- \"v\" * +Xdigit * \".\" * +(unreserved | sub_delims | \":\")\n\n  IPv6address <-                                     (h16 * \":\")[6] * ls32 |\n                                              \"::\" * (h16 * \":\")[5] * ls32 |\n               ?( h16                     ) * \"::\" * (h16 * \":\")[4] * ls32 |\n               ?( h16 * (\":\" * h16)[0..1] ) * \"::\" * (h16 * \":\")[3] * ls32 |\n               ?( h16 * (\":\" * h16)[0..2] ) * \"::\" * (h16 * \":\")[2] * ls32 |\n               ?( h16 * (\":\" * h16)[0..3] ) * \"::\" * (h16 * \":\")    * ls32 |\n               ?( h16 * (\":\" * h16)[0..4] ) * \"::\" *                  ls32 |\n               ?( h16 * (\":\" * h16)[0..5] ) * \"::\" *                  h16  |\n               ?( h16 * (\":\" * h16)[0..6] ) * \"::\"\n\n  h16 <- Xdigit[1..4]\n  ls32 <- (h16 * \":\" * h16) | IPv4address\n  IPv4address <- dec_octet * \".\" * dec_octet * \".\" * dec_octet * \".\" * dec_octet\n\n  dec_octet <- Digit[1..3]\n\n  reg_name <- *(unreserved | pct_encoded | sub_delims)\n\n  path <- path_abempty  | # begins with \"/\" or is empty\n          path_absolute | # begins with \"/\" but not \"//\"\n          path_noscheme | # begins with a non-colon segment\n          path_rootless | # begins with a segment\n          path_empty      # zero characters\n\n  path_abempty  <- (*( \"/\" * segment ))\n  path_absolute <- (\"/\" * ?( segment_nz * *( \"/\" * segment ) ))\n  path_noscheme <- (segment_nz_nc * *( \"/\" * segment ))\n  path_rootless <- (segment_nz * *( \"/\" * segment ))\n  path_empty    <- 0\n\n  segment       <- *pchar\n  segment_nz    <- +pchar\n  segment_nz_nc <- +( unreserved | pct_encoded | sub_delims | \"@\" )\n                # non_zero_length segment without any colon \":\"\n\n  pchar         <- unreserved | pct_encoded | sub_delims | \":\" | \"@\"\n\n  query         <- *( pchar | \"|\" | \"?\" )\n\n  fragment      <- *( pchar | \"|\" | \"?\" )\n\n  pct_encoded   <- \"%\" * Xdigit * Xdigit\n\n  unreserved    <- Alpha | Digit | \"-\" | \".\" | \"_\" | \"~\"\n  reserved      <- gen_delims | sub_delims\n  gen_delims    <- \":\" | \"|\" | \"?\" | \"#\" | \"[\" | \"]\" | \"@\"\n  sub_delims    <- \"!\" | \"$\" | \"&\" | \"'\" | \"(\" | \")\" | \"*\" | \"+\" | \",\" | \";\" | \"=\"\n\n"
  },
  {
    "path": "src/npeg/lib/utf8.nim",
    "content": "\nimport npeg\n\nwhen defined(nimHasUsed): {.used.}\n\ngrammar \"utf8\":\n\n  cont <- {128..191}\n\n  # Matches any utf-8 codepoint glyph\n\n  any <- {0..127} |\n         {194..223} * cont[1] |\n         {224..239} * cont[2] |\n         {240..244} * cont[3]\n\n  bom <- \"\\xff\\xfe\"\n\n  # Check for UTF-8 character classes. Depends on the tables from\n  # the nim unicode module\n\n  space <- >utf8.any: validate unicode.isSpace($1)\n  lower <- >utf8.any: validate unicode.isLower(runeAt($1, 0))\n  upper <- >utf8.any: validate unicode.isUpper(runeAt($1, 0))\n  alpha <- >utf8.any: validate unicode.isAlpha(runeAt($1, 0))\n  title <- >utf8.any: validate unicode.isTitle(runeAt($1, 0))\n"
  },
  {
    "path": "src/npeg/parsepatt.nim",
    "content": "\nimport tables, macros, sequtils, strutils, algorithm\nimport npeg/[common,patt,dot,grammar]\n\nwhen npegGraph:\n  import npeg/[railroad]\n\n\n# Recursively compile a PEG rule to a Pattern\n\nproc parsePatt*(pattName: string, nn: NimNode, grammar: Grammar, dot: Dot = nil): Patt =\n\n  when npegDebug:\n    echo \"parse \", pattName, \" <- \", nn.repr\n\n  proc aux(n: NimNode): Patt =\n\n    setKrakNode(n)\n\n    proc inlineOrCall(callName: string): Patt =\n\n      # Try to import symbol early so we might be able to inline or shadow it\n      if callName notin grammar.rules:\n        discard libImportRule(callName, grammar)\n\n      if pattName == callName:\n        if pattName in grammar.rules:\n          let nameShadowed = grammar.shadow(pattName)\n          return newCallPatt(nameShadowed)\n\n      if callName in grammar.rules and grammar.rules[callName].patt.len < npegInlineMaxLen:\n        when npegDebug:\n          echo \"  inline \", callName\n        dot.add(pattName, callName, \"inline\")\n        return grammar.rules[callName].patt\n\n      else:\n        when npegDebug:\n          echo \"  call \", callName\n        dot.add(pattName, callName, \"call\")\n        return newCallPatt(callName)\n\n    proc applyTemplate(tName: string, arg: NimNode): NimNode =\n      let t = if tName in grammar.templates:\n        grammar.templates[tName]\n      else:\n        libImportTemplate(tName)\n      if t != nil:\n        if arg.len-1 != t.args.len:\n          krak arg, \"Wrong number of arguments for template \" & tName & \"(\" & $(t.args.join(\",\")) & \")\"\n        proc aux(n: NimNode): NimNode =\n          if n.kind == nnkIdent and n.strVal in t.args:\n            result = arg[ find(t.args, n.strVal)+1 ]\n          else:\n            result = copyNimNode(n)\n            for nc in n:\n              result.add aux(nc)\n        result = aux(t.code).flattenChoice()\n        when npegDebug:\n          echo \"template \", tName, \" = \\n  in:  \", n.repr, \"\\n  out: \", result.repr\n\n    case n.kind:\n\n      of nnkPar:\n        if n.len > 1:\n          krak n, \"syntax error. Did you mean '|'?\"\n        result = aux n[0]\n\n      of nnkIntLit:\n        result = newPatt(n.intVal)\n\n      of nnkStrLit:\n        result = newPatt(n.strVal)\n\n      of nnkCharLit:\n        result = newPatt($n.intVal.char)\n\n      of nnkCall:\n        var name: string\n        if n[0].kind == nnkIdent:\n          name = n[0].strVal\n        elif n[0].kind == nnkDotExpr:\n          name = n[0].repr\n        else:\n          krak n, \"syntax error\"\n        let n2 = applyTemplate(name, n)\n        if n2 != nil:\n          result = aux n2\n        elif name == \"choice\":\n          result = choice(n[1..^1].map(aux))\n        elif n.len == 2:\n          case name\n            of \"R\": result = newBackrefPatt(n[1].strVal)\n        elif n.len == 3:\n          case name\n            of \"R\": result = newPatt(aux n[2], ckRef, n[1].strVal)\n        if result.len == 0:\n          krak n, \"Unknown template or capture '\" & name & \"'\"\n\n      of nnkPrefix:\n        # Nim combines all prefix chars into one string. Handle prefixes\n        # chars right to left\n        var p = aux n[1]\n        for c in n[0].strVal.reversed:\n          case c:\n            of '?': p = ?p\n            of '+': p = +p\n            of '*': p = *p\n            of '!': p = !p\n            of '&': p = &p\n            of '>': p = >p\n            of '@': p = @p\n            else: krak n, \"Unhandled prefix operator\"\n        result = p\n\n      of nnkInfix:\n        case n[0].strVal:\n          of \"*\", \"∙\": result = aux(n[1]) * aux(n[2])\n          of \"-\": result = aux(n[1]) - aux(n[2])\n          of \"^\": result = newPattAssoc(aux(n[1]), intVal(n[2]), assocLeft)\n          of \"^^\": result = newPattAssoc(aux(n[1]), intVal(n[2]), assocRight)\n          else: krak n, \"Unhandled infix operator\"\n\n      of nnkBracketExpr:\n        let p = aux(n[0])\n        if n[1].kind == nnkIntLit:\n          result = p{n[1].intVal}\n        elif n[1].kind == nnkInfix and n[1][0].eqIdent(\"..\"):\n          result = p{n[1][1].intVal..n[1][2].intVal}\n        else: krak n, \"syntax error\"\n\n      of nnkIdent:\n        result = inlineOrCall(n.strVal)\n\n      of nnkDotExpr:\n        result = inlineOrCall(n.repr)\n\n      of nnkCurly:\n        var cs: CharSet\n        for nc in n:\n          if nc.kind == nnkCharLit:\n            cs.incl nc.intVal.char\n          elif nc.kind == nnkInfix:\n            if nc[0].kind == nnkIdent and nc[0].eqIdent(\"..\"):\n              for c in nc[1].intVal..nc[2].intVal:\n                cs.incl c.char\n            else:\n              krak n, \"syntax error\"\n          else:\n            krak n, \"syntax error\"\n        if cs.card == 0:\n          result = newPatt(1)\n        else:\n          result = newPatt(cs)\n\n      of nnkCallStrLit:\n        case n[0].strVal:\n          of \"i\": \n            for c in n[1].strVal:\n              result.add newPatt({c.toLowerAscii, c.toUpperAscii})\n          of \"E\": result = newErrorPatt(n[1].strVal)\n          else: krak n, \"unhandled string prefix\"\n\n      of nnkBracket:\n        result.add newLitPatt n[0]\n\n      else:\n        echo n.astGenRepr\n        krak n, \"syntax error\"\n\n    for i in result.mitems:\n      if i.nimNode == nil:\n        i.nimNode = n\n\n  result = aux(nn.flattenChoice())\n  dot.addPatt(pattName, result.len)\n\n\n#\n# Parse a grammar. A grammar consists of named rules, where each rule is one\n# pattern\n#\n\nproc parseGrammar*(ns: NimNode, dot: Dot=nil, dumpRailroad = true): Grammar =\n  result = new Grammar\n\n  for n in ns:\n\n    if n.kind == nnkInfix and n[0].eqIdent(\"<-\"):\n\n      case n[1].kind\n      of nnkIdent, nnkDotExpr, nnkPrefix:\n        let name = if n[1].kind == nnkPrefix:\n                     when declared(expectIdent):\n                       expectIdent n[1][0], \">\"\n                     n[1][1].repr\n                   else: n[1].repr\n        var patt = parsePatt(name, n[2], result, dot)\n        if n.len == 4:\n          patt = newPatt(patt, ckCodeBlock)\n          patt[patt.high].capAction = n[3]\n        result.addRule(name, if n[1].kind == nnkPrefix: >patt else: patt, n.repr, n.lineInfoObj)\n\n        when npegGraph:\n          if dumpRailroad:\n            echo parseRailroad(n[2], result).wrap(name)\n\n      of nnkCall:\n        if n.len > 3:\n          error \"Code blocks can not be used on templates\", n[3]\n        var t = Template(name: n[1][0].strVal, code: n[2])\n        for i in 1..<n[1].len:\n          t.args.add n[1][i].strVal\n        result.templates[t.name] = t\n\n      else:\n        error \"Expected PEG rule name but got \" & $n[1].kind, n\n\n    else:\n      error \"Expected PEG rule (name <- ...)\", n\n\n"
  },
  {
    "path": "src/npeg/patt.nim",
    "content": "\nimport macros\nimport sequtils\n\nimport npeg/[common,stack]\n\n\n# Some tests on patterns\n\nproc isSet(p: Patt): bool {.used.} =\n  p.len == 1 and p[0].op == opSet\n\n\nproc toSet(p: Patt, cs: var CharSet): bool =\n  when npegOptSets:\n    if p.len == 1:\n      let i = p[0]\n      if i.op == opSet:\n        cs = i.cs\n        return true\n      if i.op == opChr:\n        cs = { i.ch }\n        return true\n      if i.op == opAny:\n        cs = {low(char)..high(char)}\n        return true\n\n\nproc checkSanity(p: Patt) =\n  if p.len >= npegPattMaxLen:\n    krak \"NPeg: grammar too complex, (\" & $p.len & \" > \" & $npegPattMaxLen & \").\\n\" &\n         \"If you think this is a mistake, increase the maximum size with -d:npegPattMaxLen=N\"\n\n\n# Checks if the passed patt matches an empty subject. This is done by executing\n# the pattern as if it was passed an empty subject and see how it terminates.\n\nproc matchesEmpty(patt: Patt): bool =\n  var backStack = initStack[int](\"backtrack\", 8, 32)\n  var ip: int\n  while ip < patt.len:\n    let i = patt[ip]\n    case i.op\n      of opChoice:\n        push(backStack, ip+i.ipOffset)\n        inc ip\n      of opCommit:\n        discard pop(backStack)\n        ip += i.ipOffset\n      of opJump: ip += i.callOffset\n      of opCapOpen, opCapClose, opNop, opSpan, opPrecPush, opPrecPop: inc ip\n      of opErr, opReturn, opCall: return false\n      of opAny, opChr, opLit, opSet, opBackref, opFail:\n        if i.failOffset != 0:\n          ip += i.failOffset\n        elif backStack.top > 0:\n          ip = pop(backStack)\n        else:\n          return false\n  return true\n\n\n# Calculate how far captures or choices can be shifted into this pattern\n# without consequences; this allows the pattern to fail before pushing to the\n# backStack or capStack\n\nproc canShift(p: Patt, enable: static[bool]): (int, int) =\n  let i = p[0]\n  if i.failOffset == 0:\n    case i.op\n    of opChr, opAny, opSet:\n      result = (1, 1)\n    else:\n      discard\n\n### Atoms\n\nproc newPatt*(s: string): Patt =\n  for ch in s:\n    result.add Inst(op: opChr, ch: ch)\n\nproc newLitPatt*(n: NimNode): Patt =\n  result.add Inst(op: opLit, lit: n)\n\nproc newPatt*(p: Patt, ck: CapKind, name = \"\"): Patt =\n  let (siShift, ipShift) = p.canShift(npegOptCapShift)\n  result.add p[0..<ipShift]\n  result.add Inst(op: opCapOpen, capKind: ck, capSiOffset: -siShift, capName: name)\n  result.add p[ipShift..^1]\n  result.add Inst(op: opCapClose, capKind: ck)\n\nproc newCallPatt*(label: string): Patt =\n  result.add Inst(op: opCall, callLabel: label)\n\nproc newPatt*(n: BiggestInt): Patt =\n  if n > 0:\n    for i in 1..n:\n      result.add Inst(op: opAny)\n  else:\n    result.add Inst(op: opNop)\n\nproc newPatt*(cs: CharSet): Patt =\n  result.add Inst(op: opSet, cs: cs)\n\nproc newBackrefPatt*(refName: string): Patt =\n  result.add Inst(op: opBackref, refName: refName)\n\nproc newReturnPatt*(): Patt =\n  result.add Inst(op: opReturn)\n\nproc newErrorPatt*(msg: string): Patt =\n  result.add Inst(op: opErr, msg: msg)\n\n\n# Add a choice/commit pair around pattern P, try to optimize head\n# fails when possible\n\nproc addChoiceCommit(addTo: var Patt, p: Patt, choiceOffset, commitOffset: int) =\n  let (siShift, ipShift) = p.canShift(npegOptHeadFail)\n  for n in 0..<ipShift:\n    addTo.add p[n]\n    addTo[addTo.high].failOffset = choiceOffset - n\n  addTo.add Inst(op: opChoice, ipOffset: choiceOffset - ipShift, siOffset: -siShift)\n  addTo.add p[ipShift..^1]\n  addTo.add Inst(op: opCommit, ipOffset: commitOffset)\n\n\n### Prefixes\n\nproc `?`*(p: Patt): Patt =\n  result.addChoiceCommit(p, p.len+2, 1)\n\nproc `*`*(p: Patt): Patt =\n  var cs: CharSet\n  if p.toSet(cs):\n    result.add Inst(op: opSpan, cs: cs)\n  else:\n    if matchesEmpty(p):\n      krak \"'*' repeat argument matches empty subject\"\n    result.addChoiceCommit(p, p.len+2, -p.len-1)\n\nproc `+`*(p: Patt): Patt =\n  result.add p\n  result.add *p\n\nproc `>`*(p: Patt): Patt =\n  return newPatt(p, ckVal)\n\nproc `!`*(p: Patt): Patt =\n  result.addChoiceCommit(p, p.len+3, 1)\n  result.add Inst(op: opFail)\n\nproc `&`*(p: Patt): Patt =\n  result.add !(!p)\n\nproc `@`*(p: Patt): Patt =\n  result.addChoiceCommit(p, p.len+2, 3)\n  result.add Inst(op: opAny)\n  result.add Inst(op: opJump, callOffset: - p.len - 3)\n\n### Infixes\n\nproc `*`*(p1, p2: Patt): Patt =\n  result.add p1\n  result.add p2\n  result.checkSanity\n\n\n# choice() is generated from | operators by flattenChoice().\n#\n# Optimizations done here:\n# - convert to union if all elements can be represented as a set\n# - head fails: when possible, opChoice is shifted into a pattern to\n#   allow the pattern to fail before emitting the opChoice\n\nproc choice*(ps: openArray[Patt]): Patt =\n  var csUnion: CharSet\n  var allSets = true\n  for p in ps:\n    var cs: CharSet\n    if p.toSet(cs):\n      csUnion = csUnion + cs\n    else:\n      allSets = false\n  if allSets:\n    result.add Inst(op: opSet, cs: csUnion)\n    return result\n\n  var lenTot, ip: int\n  lenTot = foldl(ps, a + b.len+2, 0)\n  for i, p in ps:\n    if i < ps.high:\n      result.addChoiceCommit(p, p.len+2, lenTot-ip-p.len-3)\n      ip += p.len + 2\n    else:\n      result.add p\n\nproc `-`*(p1, p2: Patt): Patt =\n  var cs1, cs2: CharSet\n  if p1.toSet(cs1) and p2.toSet(cs2):\n    result.add Inst(op: opSet, cs: cs1 - cs2)\n  else:\n    result.add !p2\n    result.add p1\n\nproc newPattAssoc*(p: Patt, prec: BiggestInt, assoc: Assoc): Patt =\n  result.add Inst(op: opPrecPush, prec: prec.int, assoc: assoc)\n  result.add p\n  result.add Inst(op: opPrecPop)\n\n\n### Others\n\nproc `{}`*(p: Patt, n: BiggestInt): Patt =\n  for i in 1..n:\n    result.add p\n\nproc `{}`*(p: Patt, range: HSlice[system.BiggestInt, system.BiggestInt]): Patt =\n  result.add p{range.a}\n  for i in range.a..<range.b:\n    result.add ?p\n\n\n"
  },
  {
    "path": "src/npeg/railroad.nim",
    "content": "\nimport macros, unicode, tables, strutils, sequtils\nimport npeg/[grammar,common]\n\nwhen not defined(js):\n  import terminal\nelse:\n  type ForeGroundColor = enum\n    fgYellow, fgMagenta, fgGreen, fgWhite, fgCyan, fgRed\n\nconst\n  fgName = fgYellow\n  fgLit = fgMagenta\n  fgLine = fgGreen\n  fgCap = fgWhite\n  fgNonterm = fgCyan\n  fgError = fgRed\n\ntype\n\n  Sym = object\n    x, y: int\n    c: Char\n\n  Char = object\n    r: Rune\n    fg: ForeGroundColor\n\n  Line = seq[Char]\n\n  Grid = seq[Line]\n\n  Node = ref object\n    w, y0, y1: int\n    syms: seq[Sym]\n    kids: seq[Kid]\n\n  Kid = object\n    dx, dy: int\n    n: Node\n\n# Provide ASCII alternative of box drawing for windows\n\nwhen defined(windows) or defined(js):\n  const asciiTable = [ (\"│\", \"|\"), (\"─\", \"-\"), (\"╭\", \".\"), (\"╮\", \".\"),\n                       (\"╰\", \"`\"), (\"╯\", \"'\"), (\"┬\", \"-\"), (\"├\", \"|\"),\n                       (\"┤\", \"|\"), (\"┴\", \"-\"), (\"━\", \"=\") ]\n\n#\n# Renders a node to text output\n#\n\nproc `$`*(n: Node): string =\n  let h = n.y1 - n.y0 + 1\n  let y0 = n.y0\n  var line: Line\n  var grid: Grid\n  for x in 0..<n.w:\n    line.add Char(r: ' '.Rune)\n  for y in 0..<h: grid.add line\n\n  proc render(n: Node, x, y: int) =\n    for k in n.kids:\n      render(k.n, x + k.dx, y + k.dy)\n    for s in n.syms:\n      let sx = x+s.x\n      let sy = y+s.y - y0\n      grid[sy][sx] = s.c\n  render(n, 0, 0)\n      \n  when defined(windows) or defined(js):\n    for line in grid:\n      for cell in line:\n        result.add ($cell.r).multiReplace(asciiTable)\n      result.add \"\\r\\n\"\n  else:\n    var fg = fgLine\n    for line in grid:\n      for cell in line:\n        if fg != cell.fg:\n          fg = cell.fg\n          result.add ansiForegroundColorCode(fg)\n        result.add $cell.r\n      result.add \"\\n\"\n    result.add ansiForegroundColorCode(fgLine)\n\nproc poke(n: Node, fg: ForeGroundColor, cs: varArgs[tuple[x, y: int, s: string]]) =\n  for c in cs:\n    n.syms.add Sym(x: c.x, y: c.y, c: Char(r: c.s.runeAt(0), fg: fg))\n\nproc pad(n: Node, left, right, top, bottom = 0): Node = \n  result = Node(w: n.w + left + right, y0: n.y0 - top, y1: n.y1 + bottom)\n  result.kids.add Kid(n: n, dx: left, dy: 0)\n  for x in 0..<left:\n    result.poke fgLine, (x, 0, \"─\")\n  for x in n.w+left..<result.w:\n    result.poke fgLine, (x, 0, \"─\")\n\nproc wrap*(n: Node, name: string): Node =\n  let namer = (name & \" \").toRunes()\n  let nl = namer.len()\n  result = n.pad(nl+2, 2)\n  result.poke fgLine, (nl+0, 0, \"o\"), (nl+1, 0, \"─\"), (result.w-2, 0, \"─\"), (result.w-1, 0, \"o\")\n  for i in 0..<nl:\n    result.poke fgName, (i, 0, $namer[i])\n\nproc newNode(s: string, fg = fgLine): Node =\n  let rs = s.dumpSubject().toRunes()\n  let n = Node(w: rs.len)\n  for x in 0..<rs.len:\n    n.poke fg, (x, 0, $rs[x])\n  result = n.pad(1, 1)\n\nproc newCapNode(n: Node, name = \"\"): Node =\n  result = pad(n, 2, 2)\n  result.y0 = n.y0 - 1\n  result.y1 = n.y1 + 1\n  let (x0, x1, y0, y1) = (1, result.w-2, result.y0, result.y1)\n  result.poke fgCap, (x0, y0, \"╭\"), (x1, y0, \"╮\"), (x0, y1, \"╰\"), (x1, y1, \"╯\")\n  for x in x0+1..x1-1:\n    result.poke fgCap, (x, y0, \"╶\"), (x, y1, \"╶\")\n  for y in y0+1..y1-1:\n    if y != 0:\n      result.poke fgCap, (x0, y, \"┆\"), (x1, y, \"┆\")\n  let namer = name.toRunes()\n  for i in 0..<namer.len:\n    result.poke fgCap, ((x1+x0-namer.len)/%2+i, y0, $namer[i])\n\nproc newPrecNode(n: Node, prec: BiggestInt, lr: string): Node =\n  let l = lr & $prec & lr\n  result = pad(n, if l.len > n.w: l.len-n.w else: 0, 0, 1)\n  for i, c in l:\n    result.poke fgCap, (result.w/%2 - l.len/%2 + i, -1, $c)\n\nproc `*`(n1, n2: Node): Node =\n  result = Node(w: n1.w + n2.w + 1, y0: min(n1.y0, n2.y0), y1: max(n1.y1, n2.y1))\n  result.poke fgGreen, (n1.w, 0, \"»\")\n  result.kids.add Kid(n: n1, dx: 0)\n  result.kids.add Kid(n: n2, dx: n1.w+1)\n\nproc `?`(n: Node): Node =\n  result = n.pad(1, 1, 1, 0)\n  let (x1, x2, y1, y2) = (0, n.w+1, -1 + n.y0, 0)\n  result.poke fgLine, (x1, y1, \"╭\"), (x1, y2, \"┴\"), (x2, y1, \"╮\"), (x2, y2, \"┴\")\n  for x in x1+1..x2-1:\n    result.poke fgLine, (x, y1, \"─\")\n  for y in y1+1..y2-1:\n    result.poke fgLine, (x1, y, \"│\"), (x2, y, \"│\")\n  result.poke fgLine, ((x1+x2)/%2, y1, \"»\")\n\nproc `+`(n: Node): Node =\n  result = n.pad(1, 1, 0, 1)\n  let (x1, x2, y1, y2) = (0, n.w+1, 0, n.y1+1)\n  result.poke fgLine, (x1, y1, \"┬\"), (x1, y2, \"╰\"), (x2, y1, \"┬\"), (x2, y2, \"╯\")\n  for x in x1+1..x2-1:\n    result.poke fgLine, (x, y2, \"─\")\n  for y in y1+1..y2-1:\n    result.poke fgLine, (x1, y, \"│\"), (x2, y, \"│\")\n  result.poke fgLine, ((x1+x2)/%2, y2, \"«\")\n\nproc `!`(n: Node): Node =\n  result = n.pad(0, 0, 1)\n  let (x0, x1) = (1, result.w-2)\n  for x in x0..x1:\n    result.poke fgRed, (x, result.y0, \"━\")\n\nproc `-`*(p1, p2: Node): Node =\n  return !p2 * p1\n\nproc `*`(n: Node): Node = ? + n\n\nproc `@`(n: Node): Node =\n  result = *(!n * newNode(\"1\")) * n\n\nproc `&`(n: Node): Node =\n  result = ! ! n\n\nproc choice(ns: varArgs[Node]): Node =\n  var wmax = 0\n  for n in ns:\n    wmax = max(wmax, n.w)\n  var dys = @[0]\n  var dy = 0\n  for i in 0..<ns.len-1:\n    inc dy, ns[i].y1 - ns[i+1].y0 + 1\n    dys.add dy\n  result = Node(w: wmax+4, y0: ns[0].y0, y1: dy+ns[ns.high].y1)\n  let x0 = 1\n  let x1 = wmax+2\n  result.poke fgLine, (0, 0, \"─\"), (result.w-1, 0, \"─\")\n  for i in 0..<ns.len:\n    let n = ns[i]\n    result.kids.add Kid(n: n.pad(0, wmax-n.w), dx: 2, dy: dys[i])\n  for y in 1..<dys[dys.high]:\n    result.poke fgLine, (x0, y, \"│\"), (x1, y, \"│\")\n  result.poke fgLine, (x0, 0, \"┬\"), (x1, 0, \"┬\")\n  for i in 0..<ns.len-1:\n    if i > 0:\n      result.poke fgLine, (x0, dys[i], \"├\"), (x1, dys[i], \"┤\")\n  result.poke fgLine, (x0, dys[dys.high], \"╰\"), (x1, dys[dys.high], \"╯\")\n\nproc `{}`*(p: Node, n: BiggestInt): Node =\n  result = p\n  for i in 1..<n:\n    result = result * p\n\nproc `{}`*(p: Node, range: HSlice[system.BiggestInt, system.BiggestInt]): Node =\n  result = p{range.a}\n  for i in range.a..<range.b:\n    result = result * ?p\n\n# This is a simplified parser based on parsePatt(), but lacking any error\n# checking. This will always run after parsePatt(), so any errors would already\n# have been caught there\n\nproc parseRailRoad*(nn: NimNode, grammar: Grammar): Node =\n\n  proc aux(n: NimNode): Node =\n\n    proc applyTemplate(name: string, arg: NimNode): NimNode =\n      let t = if name in grammar.templates:\n        grammar.templates[name]\n      else:\n        libImportTemplate(name)\n      if t != nil:\n        proc aux(n: NimNode): NimNode =\n          if n.kind == nnkIdent and n.strVal in t.args:\n            result = arg[ find(t.args, n.strVal)+1 ]\n          else:\n            result = copyNimNode(n)\n            for nc in n:\n              result.add aux(nc)\n        result = aux(t.code).flattenChoice()\n\n    case n.kind:\n\n      of nnKPar:\n        result = aux n[0]\n\n      of nnkIntLit:\n        result = newNode($n.intVal, fgLit)\n\n      of nnkStrLit:\n        result = newNode(\"\\\"\" & $n.strval.dumpSubject() & \"\\\"\", fgLit)\n\n      of nnkCharLit:\n        result = newNode(\"'\" & $n.intVal.char & \"'\", fgLit)\n\n      of nnkCall:\n        var name: string\n        if n[0].kind == nnkIdent:\n          name = n[0].strVal\n        elif n[0].kind == nnkDotExpr:\n          name = n[0].repr\n        let n2 = applyTemplate(name, n)\n        if n2 != nil:\n          result = aux n2\n        elif name == \"choice\":\n          result = choice(n[1..^1].map(aux))\n        elif n.len == 2:\n          result = newCapNode aux(n[1])\n        elif n.len == 3:\n          result = newCapNode(aux(n[2]), n[1].strVal)\n\n      of nnkPrefix:\n        # Nim combines all prefix chars into one string. Handle prefixes\n        # chars right to left\n        let cs = n[0].strVal\n        var p = aux n[1]\n        for i in 1..cs.len:\n          case cs[cs.len-i]:\n            of '?': p = ?p\n            of '+': p = +p\n            of '*': p = *p\n            of '!': p = !p\n            of '@': p = @p\n            of '&': p = &p\n            of '>': p = newCapNode(p)\n            else: p = p\n        result = p\n\n      of nnkInfix:\n        case n[0].strVal:\n          of \"*\", \"∙\": result = aux(n[1]) * aux(n[2])\n          of \"-\": result = aux(n[1]) - aux(n[2])\n          of \"^\": result = newPrecNode(aux(n[1]), intVal(n[2]), \"<\")\n          of \"^^\": result = newPrecNode(aux(n[1]), intVal(n[2]), \">\")\n          else: discard\n\n      of nnkBracketExpr:\n        let p = aux(n[0])\n        if n[1].kind == nnkIntLit:\n          result = p{n[1].intVal}\n        elif n[1].kind == nnkInfix and n[1][0].eqIdent(\"..\"):\n          result = p{n[1][1].intVal..n[1][2].intVal}\n        else: discard\n\n      of nnkIdent:\n        result = newNode(\"[\" & n.strVal & \"]\", fgNonterm)\n\n      of nnkDotExpr:\n        result = newNode(\"[\" & n.repr & \"]\", fgNonterm)\n\n      of nnkCurly:\n        var cs: CharSet\n        for nc in n:\n          if nc.kind == nnkCharLit:\n            cs.incl nc.intVal.char\n          elif nc.kind == nnkInfix:\n            if nc[0].kind == nnkIdent and nc[0].eqIdent(\"..\"):\n              for c in nc[1].intVal..nc[2].intVal:\n                cs.incl c.char\n        if cs.card == 0:\n          result = newNode(\"1\", fgNonterm)\n        else:\n          result = newNode(dumpSet(cs), fgLit)\n\n      of nnkCallStrLit:\n        case n[0].strVal:\n          of \"i\": result = newNode(n[1].strval)\n          of \"E\": result = newNode(\"ERROR\", fgError)\n\n      of nnkBracket:\n        result = newNode(\"[\" & n[0].repr & \"]\", fgNonterm)\n\n      else:\n        discard\n\n  let nnf = nn.flattenChoice\n  result = aux(nnf)\n\n\n"
  },
  {
    "path": "src/npeg/stack.nim",
    "content": "\n# This module implements a basic stack[T]. This is used instead of seq[T]\n# because the latter has bad performance when unwinding more then one frame at\n# a time (ie, setlen). These stacks keep track of their own top and do not\n# shrink the underlying seq when popping or unwinding.\n\ntype\n  Stack*[T] = object\n    name: string\n    top*: int\n    max: int\n    frames: seq[T]\n\n\nproc `$`*[T](s: Stack[T]): string =\n  for i in 0..<s.top:\n    result.add $i & \": \" & $s.frames[i] & \"\\n\"\n\nproc initStack*[T](name: string, len: int, max: int=int.high): Stack[T] =\n  result.name = name\n  result.frames.setLen len\n  result.max = max\n\nproc grow*[T](s: var Stack[T]) =\n  if s.top >= s.max:\n    mixin NPegStackOverflowError\n    raise newException(NPegStackOverflowError, s.name & \" stack overflow, depth>\" & $s.max)\n  s.frames.setLen s.frames.len * 2\n\ntemplate push*[T](s: var Stack[T], frame: T) =\n  if s.top >= s.frames.len: grow(s)\n  s.frames[s.top] = frame\n  inc s.top\n\ntemplate pop*[T](s: var Stack[T]): T =\n  assert s.top > 0\n  dec s.top\n  s.frames[s.top]\n\ntemplate peek*[T](s: Stack[T]): T =\n  assert s.top > 0\n  s.frames[s.top-1]\n\ntemplate `[]`*[T](s: Stack[T], idx: int): T =\n  assert idx < s.top\n  s.frames[idx]\n\ntemplate update*[T](s: Stack[T], field: untyped, val: untyped) =\n  assert s.top > 0\n  s.frames[s.top-1].field = val\n\n"
  },
  {
    "path": "src/npeg.nim",
    "content": "\n#\n# Copyright (c) 2019 Ico Doornekamp\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# This parser implementation is based on the following papers:\n#\n# - A Text Pattern-Matching Tool based on Parsing Expression Grammars\n#   (Roberto Ierusalimschy)\n#\n# - An efficient parsing machine for PEGs\n#   (Jos Craaijo)\n#\n\n## Note: This document is rather terse, for the complete NPeg manual please refer\n## to the README.md or the git project page at https://github.com/zevv/npeg\n##   \n## NPeg is a pure Nim pattern matching library. It provides macros to compile\n## patterns and grammars (PEGs) to Nim procedures which will parse a string and\n## collect selected parts of the input. PEGs are not unlike regular\n## expressions, but offer more power and flexibility, and have less ambiguities.\n##\n## Here is a simple example showing the power of NPeg: The macro `peg` compiles a\n## grammar definition into a `parser` object, which is used to match a string and\n## place the key-value pairs into the Nim table `words`:\n\nrunnableExamples:\n\n  import npeg, strutils, tables\n\n  var words: Table[string, int]\n\n  let parser = peg \"pairs\":\n    pairs <- pair * *(',' * pair) * !1\n    word <- +Alpha\n    number <- +Digit\n    pair <- >word * '=' * >number:\n      words[$1] = parseInt($2)\n\n  doAssert parser.match(\"one=1,two=2,three=3,four=4\").ok\n\n\nimport tables\nimport macros\nimport strutils\nimport npeg/[common,codegen,capture,parsepatt,grammar,dot]\n\nexport NPegException,\n       NPegStackOverflowError,\n       NPegUnknownBackrefError,\n       NPegCaptureOutOfRangeError,\n       NpegParseError,\n       contains, `[]`, len\n\n# Create a parser for a PEG grammar\n\nproc pegAux(name: string, subjectType, userDataType, userDataId, n: NimNode): NimNode =\n  var dot = newDot(name)\n  var grammar = parseGrammar(n, dot)\n  var program = grammar.link(name, dot)\n  let code = program.genCode(subjectType, userDataType, userDataId)\n  dot.dump()\n  code\n\nmacro peg*(name: untyped, n: untyped): untyped =\n  ## Construct a parser from the given PEG grammar. `name` is the initial\n  ## grammar rule where parsing starts. This macro returns a `Parser` type\n  ## which can later be used for matching subjects with the `match()` proc\n  pegAux name.strVal, ident \"char\", ident \"bool\", ident \"userdata\", n\n\nmacro peg*(name: untyped, userData: untyped, n: untyped): untyped =\n  ## Construct a parser from the given PEG grammar. `name` is the initial\n  ## grammar rule where parsing starts. This macro returns a `Parser` type\n  ## which can later be used for matching subjects with the `match()` proc\n  ##\n  ## The `userdata` argument is a colon expression with an identifier and a\n  ## type, this identifier is available in code block captions during parsing.\n  expectKind(userData, nnkExprColonExpr)\n  pegAux name.strVal, ident \"char\", userData[1], userData[0], n\n\nmacro peg*(name: untyped, subjectType, userData, n: untyped): untyped =\n  ## Construct a parser from the given PEG grammar. `name` is the initial\n  ## grammar rule where parsing starts. This macro returns a `Parser` type\n  ## which can later be used for matching subjects with the `match()` proc\n  ##\n  ## The `subjectType` argument is a Nim type which should match the base\n  ## type of the subject passed to `match()`.\n  ##\n  ## The `userdata` argument is a colon expression with an identifier and a\n  ## type, this identifier is available in code block captions during parsing.\n  expectKind(userData, nnkExprColonExpr)\n  pegAux name.strVal, subjectType, userData[1], userData[0], n\n\ntemplate patt*(n: untyped): untyped =\n  ## Construct a parser from a single PEG rule. This is similar to the regular\n  ## `peg()` macro, but useful for short regexp-like parsers that do not need a\n  ## complete grammar.\n  peg anonymous:\n    anonymous <- n\n\ntemplate patt*(n: untyped, code: untyped): untyped =\n  ## Construct a parser from a single PEG rule. This is similar to the regular\n  ## `peg()` macro, but useful for short regexp-like parsers that do not need a\n  ## complete grammar. This variant takes a code block which will be used as\n  ## code block capture for the anonymous rule.\n  peg anonymous:\n    anonymous <- n:\n      code\n\nmacro grammar*(libNameNode: untyped, n: untyped) =\n  ## This macro defines a collection of rules to be stored in NPeg's global\n  ## grammar library.\n  let libName = libNameNode.strVal\n  let grammar = parseGrammar(n, dumpRailroad = libName != \"\")\n  libStore(libName, grammar)\n\n\nproc match*[S, T](p: Parser, s: openArray[S], userData: var T): MatchResult[S] =\n  ## Match a subject string with the given generic parser. The returned\n  ## `MatchResult` contains the result of the match and can be used to query\n  ## any captures.\n  var ms = p.fn_init()\n  p.fn_run(ms, s, userData)\n\n\nproc match*[S](p: Parser, s: openArray[S]): MatchResult[S] =\n  ## Match a subject string with the given parser. The returned `MatchResult`\n  ## contains the result of the match and can be used to query any captures.\n  var userData: bool # dummy if user does not provide a type\n  p.match(s, userData)\n\n\n# Match a file\n\nwhen defined(windows) or defined(posix):\n  import memfiles, os\n  proc matchFile*[T](p: Parser, fname: string, userData: var T): MatchResult[char] =\n    # memfiles.open() throws on empty files, work around that\n    if os.getFileSize(fname) > 0:\n      var m = memfiles.open(fname)\n      var a: ptr UncheckedArray[char] = cast[ptr UncheckedArray[char]](m.mem)\n      var ms = p.fn_init()\n      result = p.fn_run(ms, toOpenArray(a, 0, m.size-1), userData)\n      m.close()\n    else:\n      result = match(p, \"\", userData)\n  \n  proc matchFile*(p: Parser, fname: string): MatchResult[char] =\n    var userData: bool # dummy if user does not provide a type\n    matchFile(p, fname, userData)\n\n\nproc captures*(mr: MatchResult[char]): seq[string] =\n  ## Return all plain string captures from the match result\n  for cap in collectCaptures(mr.cs):\n    result.add cap.s\n\nproc captures*[S](mr: MatchResult[S]): seq[S] =\n  ## Return all plain string captures from the match result\n  for cap in collectCaptures(mr.cs):\n    result.add cap.s\n\ntemplate nimBug22740*() =\n  ## Provide stub templates as a workaround for https://github.com/nim-lang/Nim/issues/22740.\n  ## Invoke this template in your code if you want to define a parser in a generic proc.\n  template `>`(a: untyped): untyped = discard\n  template `*`(a: untyped): untyped = discard\n  template `-`(a: untyped): untyped = discard\n  template `+`(a: untyped): untyped = discard\n  template `?`(a: untyped): untyped = discard\n  template `!`(a: untyped): untyped = discard\n  template `$`(a: untyped): untyped = discard\n\n\nimport npeg/lib/core\n\n"
  },
  {
    "path": "tests/basics.nim",
    "content": "import unittest\nimport strutils\nimport npeg\n  \n{.push warning[Spacing]: off.}\n\n\nsuite \"unit tests\":\n\n  test \"atoms\":\n    doAssert     patt(0 * \"a\").match(\"a\").ok\n    doAssert     patt(1).match(\"a\").ok\n    doAssert     patt(1).match(\"a\").ok\n    doAssert     patt(2).match(\"a\").ok == false\n    doAssert     patt(\"a\").match(\"a\").ok\n    doAssert     patt(\"a\").match(\"b\").ok == false\n    doAssert     patt(\"abc\").match(\"abc\").ok\n    doAssert     patt({'a'}).match(\"a\").ok\n    doAssert     patt({'a'}).match(\"b\").ok == false\n    doAssert     patt({'a','b'}).match(\"a\").ok\n    doAssert     patt({'a','b'}).match(\"b\").ok\n    doAssert     patt({'a','b'}).match(\"c\").ok == false\n    doAssert     patt({'a'..'c'}).match(\"a\").ok\n    doAssert     patt({'a'..'c'}).match(\"b\").ok\n    doAssert     patt({'a'..'c'}).match(\"c\").ok\n    doAssert     patt({'a'..'c'}).match(\"d\").ok == false\n    doAssert     patt({'a'..'c'}).match(\"a\").ok\n    doAssert     patt(\"\").match(\"abcde\").matchLen == 0\n    doAssert     patt(\"a\").match(\"abcde\").matchLen == 1\n    doAssert     patt(\"ab\").match(\"abcde\").matchLen == 2\n    doAssert     patt(i\"ab\").match(\"AB\").ok\n\n  test \"*: concatenation\":\n    doAssert     patt(\"a\" * \"b\").match(\"ab\").ok\n    #doAssert     patt(\"a\" ∙ \"b\").match(\"ab\").ok\n\n  test \"?: zero or one\":\n    doAssert     patt(\"a\" * ?\"b\" * \"c\").match(\"abc\").ok\n    doAssert     patt(\"a\" * ?\"b\" * \"c\").match(\"ac\").ok\n\n  test \"+: one or more\":\n    doAssert     patt(\"a\" * +\"b\" * \"c\").match(\"abc\").ok\n    doAssert     patt(\"a\" * +\"b\" * \"c\").match(\"abbc\").ok\n    doAssert     patt(\"a\" * +\"b\" * \"c\").match(\"ac\").ok == false\n\n  test \"*: zero or more\":\n    doAssert     patt(*'a').match(\"aaaa\").ok\n    doAssert     patt(*'a' * 'b').match(\"aaaab\").ok\n    doAssert     patt(*'a' * 'b').match(\"bbbbb\").ok\n    doAssert     patt(*'a' * 'b').match(\"caaab\").ok == false\n    doAssert     patt(+'a' * 'b').match(\"aaaab\").ok\n    doAssert     patt(+'a' * 'b').match(\"ab\").ok\n    doAssert     patt(+'a' * 'b').match(\"b\").ok == false\n\n  test \"!: not predicate\":\n    doAssert     patt('a' * !'b').match(\"ac\").ok\n    doAssert     patt('a' * !'b').match(\"ab\").ok == false\n\n  test \"&: and predicate\":\n    doAssert     patt(&\"abc\").match(\"abc\").ok\n    doAssert     patt(&\"abc\").match(\"abd\").ok == false\n    doAssert     patt(&\"abc\").match(\"abc\").matchLen == 0\n\n  test \"@: search\":\n    doAssert     patt(@\"fg\").match(\"abcdefghijk\").matchLen == 7\n\n  test \"[n]: count\":\n    doAssert     patt(1[3]).match(\"aaaa\").ok\n    doAssert     patt(1[4]).match(\"aaaa\").ok\n    doAssert     patt(1[5]).match(\"aaaa\").ok == false\n\n  test \"[m..n]: count\":\n    doAssert     patt('a'[2..4] * !1).match(\"\").ok == false\n    doAssert     patt('a'[2..4] * !1).match(\"a\").ok == false\n    doAssert     patt('a'[2..4] * !1).match(\"aa\").ok\n    doAssert     patt('a'[2..4] * !1).match(\"aaa\").ok\n    doAssert     patt('a'[2..4] * !1).match(\"aaaa\").ok\n    doAssert     patt('a'[2..4] * !1).match(\"aaaaa\").ok == false\n\n    doAssert     patt('a'[0..1] * !1).match(\"\").ok\n    doAssert     patt('a'[0..1] * !1).match(\"a\").ok\n    doAssert     patt('a'[0..1] * !1).match(\"aa\").ok == false\n\n  test \"|: ordered choice\":\n    doAssert     patt(\"ab\" | \"cd\").match(\"ab\").ok\n    doAssert     patt(\"ab\" | \"cd\").match(\"cd\").ok\n    doAssert     patt(\"ab\" | \"cd\").match(\"ef\").ok == false\n    doAssert     patt((\"ab\" | \"cd\") | \"ef\").match(\"ab\").ok == true\n    doAssert     patt((\"ab\" | \"cd\") | \"ef\").match(\"cd\").ok == true\n    doAssert     patt((\"ab\" | \"cd\") | \"ef\").match(\"ef\").ok == true\n    doAssert     patt(\"ab\" | (\"cd\") | \"ef\").match(\"ab\").ok == true\n    doAssert     patt(\"ab\" | (\"cd\") | \"ef\").match(\"cd\").ok == true\n    doAssert     patt(\"ab\" | (\"cd\") | \"ef\").match(\"ef\").ok == true\n\n  test \"-: difference\":\n    doAssert     patt(\"abcd\" - \"abcdef\").match(\"abcdefgh\").ok == false\n    doAssert     patt(\"abcd\" - \"abcdf\").match(\"abcdefgh\").ok\n\n  test \"Builtins\":\n    doAssert     patt(Digit).match(\"1\").ok\n    doAssert     patt(Digit).match(\"a\").ok == false\n    doAssert     patt(Upper).match(\"A\").ok\n    doAssert     patt(Upper).match(\"a\").ok == false\n    doAssert     patt(Lower).match(\"a\").ok\n    doAssert     patt(Lower).match(\"A\").ok == false\n    doAssert     patt(+Digit).match(\"12345\").ok\n    doAssert     patt(+Xdigit).match(\"deadbeef\").ok\n    doAssert     patt(+Graph).match(\" x\").ok == false\n\n  test \"Misc combos\":\n    doAssert     patt('a' | ('b' * 'c')).match(\"a\").ok\n    doAssert     patt('a' | ('b' * 'c') | ('d' * 'e' * 'f')).match(\"a\").ok\n    doAssert     patt('a' | ('b' * 'c') | ('d' * 'e' * 'f')).match(\"bc\").ok\n    doAssert     patt('a' | ('b' * 'c') | ('d' * 'e' * 'f')).match(\"def\").ok\n\n  test \"Compile time 1\":\n    proc doTest(): string {.compileTime.} =\n      var n: string\n      let p = peg \"number\":\n        number <- >+Digit:\n          n = $1\n      doAssert p.match(\"12345\").ok\n      return n\n    const v = doTest()\n    doAssert v == \"12345\"\n\n  test \"Compile time 2\":\n    static:\n      var n: string\n      let p = peg \"number\":\n        number <- >+Digit:\n          n = $1\n      doAssert p.match(\"12345\").ok\n      doAssert n == \"12345\"\n\n  test \"matchMax\":\n    let s = peg \"line\":\n      line   <- one | two\n      one    <- +Digit * 'c' * 'd' * 'f'\n      two    <- +Digit * 'b'\n    let r = s.match(\"1234cde\")\n    doAssert r.ok == false\n    doAssert r.matchLen == 4\n    doAssert r.matchMax == 6\n\n  test \"grammar1\":\n    let a = peg \"r1\":\n      r1 <- \"abc\"\n      r2 <- r1 * r1\n    doAssert a.match(\"abcabc\").ok\n\n  test \"grammar2\":\n    let a = peg \"r1\":\n      r2 <- r1 * r1\n      r1 <- \"abc\"\n    doAssert a.match(\"abcabc\").ok\n  \n  test \"backref\":\n    doAssert patt(R(\"sep\", Alpha) * *(1 - R(\"sep\")) * R(\"sep\") * !1).match(\"abbbba\").ok\n    doAssert patt(R(\"sep\", Alpha) * *(1 - R(\"sep\")) * R(\"sep\") * !1).match(\"abbbbc\").ok == false\n\n  test \"raise exception 1\":\n    let a = patt E\"boom\"\n    expect NPegParseError:\n      doAssert a.match(\"abcabc\").ok\n\n  test \"raise exception 2\":\n    let a = patt 4 * E\"boom\"\n    try:\n      doAssert a.match(\"abcabc\").ok\n    except NPegParseError as e:\n      doAssert e.matchLen == 4\n      doAssert e.matchMax == 4\n\n  test \"out of range capture exception 1\":\n    expect NPegCaptureOutOfRangeError:\n      let a = patt 1:\n        echo capture[10].s\n      doAssert a.match(\"c\").ok\n\n  test \"out of range capture exception 2\":\n    expect NPegCaptureOutOfRangeError:\n      let a = patt 1:\n        echo $9\n      doAssert a.match(\"c\").ok\n\n  test \"unknown backref error\":\n    expect NPegUnknownBackrefError:\n      discard patt(R(\"sep\", Alpha) * *(1 - R(\"sep\")) * R(\"sap\") * !1).match(\"abbbba\")\n\n  test \"user validation\":\n    let p = peg \"line\":\n      line <- uint8 * \",\" * uint8 * !1\n      uint8 <- >+Digit:\n        let v = parseInt($1)\n        validate(v>=0 and v<=255)\n    doAssert p.match(\"10,10\").ok\n    doAssert p.match(\"0,255\").ok\n    doAssert not p.match(\"10,300\").ok\n    doAssert not p.match(\"300,10\").ok\n\n  test \"user fail\":\n    let p = peg \"line\":\n      line <- 1:\n        fail()\n    doAssert not p.match(\"a\").ok\n\n  test \"templates\":\n    let p = peg \"a\":\n      list(patt, sep) <- patt * *(sep * patt)\n      commaList(patt) <- list(patt, \",\")\n      a <- commaList(>+Digit)\n    doAssert p.match(\"11,22,3\").captures == [\"11\",\"22\",\"3\"]\n\n  test \"templates with choices\":\n    let p = peg aap:\n      one() <- \"one\"\n      two() <- \"one\"\n      three() <- \"flip\" | \"flap\"\n      aap <- one() | two() | three()\n    doAssert p.match(\"onetwoflip\").ok\n\n"
  },
  {
    "path": "tests/captures.nim",
    "content": "import unittest\nimport npeg\nimport strutils\nimport json\n  \n{.push warning[Spacing]: off.}\n\n\nsuite \"captures\":\n\n  test \"no captures\":\n    doAssert    patt(1).match(\"a\").captures == @[]\n\n  test \"string captures\":\n    doAssert     patt(>1).match(\"ab\").captures == @[\"a\"]\n    doAssert     patt(>(>1)).match(\"ab\").captures == @[\"a\", \"a\"]\n    doAssert     patt(>1 * >1).match(\"ab\").captures == @[\"a\", \"b\"]\n    doAssert     patt(>(>1 * >1)).match(\"ab\").captures == @[\"ab\", \"a\", \"b\"]\n    doAssert     patt(>(>1 * >1)).match(\"ab\").captures == @[\"ab\", \"a\", \"b\"]\n\n  test \"code block captures\":\n    let p = peg \"foo\":\n      foo <- >1:\n        doAssert $1 == \"a\"\n        doAssert @1 == 0\n    doAssert p.match(\"a\").ok\n\n  test \"code block captures 2\":\n    let p = peg(\"foo\", v: string):\n      foo <- >1: v = $1\n    var a: string\n    doAssert p.match(\"a\", a).ok\n    doAssert a == \"a\"\n  \n  test \"code block captures 3\":\n    var a: string\n    let p = patt >1:\n        a = $1\n    doAssert p.match(\"a\").ok\n    doAssert a == \"a\"\n  \n  test \"code block captures 4\":\n    let p = peg \"foo\":\n      foo <- +Digit * >1:\n        doAssert $1 == \"a\"\n        doAssert @1 == 4\n    doAssert p.match(\"1234a\").ok\n\n  test \"code block captures with typed parser\":\n\n    type Thing = object\n      word: string\n      number: int\n\n    let s = peg(\"foo\", t: Thing):\n      foo <- word * number\n      word <- >+Alpha:\n        t.word = $1\n      number <- >+Digit:\n        t.number = parseInt($1)\n\n    var t = Thing()\n    doAssert s.match(\"foo123\", t).ok == true\n    doAssert t.word == \"foo\"\n    doAssert t.number == 123\n\n  when not defined(gcDestructors):\n    test \"Capture out of range\":\n      expect NPegException:\n        let p = peg \"l\":\n          l <- 1: echo $1\n        discard p.match(\"a\")\n\n  test \"push\":\n    let p = peg \"m\":\n      m <- >n * '+' * >n:\n        push $(parseInt($1) + parseInt($2))\n      n <- +Digit\n    let r = p.match(\"12+34\")\n    doAssert r.captures()[0] == \"46\"\n  \n  test \"nested\":\n    doAssert patt(>(>1 * >1)).match(\"ab\").captures == @[\"ab\", \"a\", \"b\"]\n\n  test \"nested codeblock\":\n    let p = peg foo:\n      foo <- >(>1 * b)\n      b <- >1: push $1\n    doAssert p.match(\"ab\").captures() == @[\"ab\", \"a\", \"b\"]\n\n  test \"clyybber\":\n    let p = peg \"m\":\n      m <- n * '+' * n:\n        push $(parseInt($1) + parseInt($2))\n      >n <- +Digit\n    let r = p.match(\"12+34\")\n    doAssert r.captures()[0] == \"46\"\n"
  },
  {
    "path": "tests/config.nims",
    "content": "switch(\"path\", \"$projectDir/../src\")\nswitch(\"hints\", \"off\")\n"
  },
  {
    "path": "tests/examples.nim",
    "content": "import unittest\nimport npeg\nimport json\nimport strutils\nimport math\nimport tables\nimport npeg/lib/uri\n\n{.push warning[Spacing]: off.}\n\n\nsuite \"examples\":\n\n  ######################################################################\n\n  test \"misc\":\n\n    let p1 = patt +{'a'..'z'}\n    doAssert p1.match(\"lowercaseword\").ok\n\n    let p2 = peg \"ident\":\n      lower <- {'a'..'z'}\n      ident <- +lower\n    doAssert p2.match(\"lowercaseword\").ok\n\n  ######################################################################\n\n  test \"shadowing\":\n    \n    let parser = peg \"line\":\n      line <- uri.URI\n      uri.scheme <- >uri.scheme\n      uri.host <- >uri.host\n      uri.port <- >+Digit\n      uri.path <- >uri.path\n    \n    let r = parser.match(\"http://nim-lang.org:8080/one/two/three\")\n    doAssert r.captures == @[\"http\", \"nim-lang.org\", \"8080\", \"/one/two/three\"]\n\n  ######################################################################\n\n  test \"matchFile\":\n\n    when defined(windows) or defined(posix):\n\n      let parser = peg \"pairs\":\n        pairs <- pair * *(',' * pair)\n        word <- +Alnum\n        number <- +Digit\n        pair <- (>word * '=' * >number)\n\n      let r = parser.matchFile \"tests/testdata\"\n      doAssert r.ok\n      doAssert r.captures == @[\"one\", \"1\", \"two\", \"2\", \"three\", \"3\", \"four\", \"4\"]\n\n  ######################################################################\n\n  test \"JSON parser\":\n\n    let json = \"\"\"\n      {\n          \"glossary\": {\n              \"title\": \"example glossary\",\n              \"GlossDiv\": {\n                  \"title\": \"S\",\n                  \"GlossList\": {\n                      \"GlossEntry\": {\n                          \"ID\": \"SGML\",\n                              \"SortAs\": \"SGML\",\n                              \"GlossTerm\": \"Standard Generalized Markup Language\",\n                              \"Acronym\": \"SGML\",\n                              \"Abbrev\": \"ISO 8879:1986\",\n                              \"GlossDef\": {\n                              \"para\": \"A meta-markup language, used to create markup languages such as DocBook.\",\n                              \"GlossSeeAlso\": [\"GML\", \"XML\"]\n                          },\n                          \"GlossSee\": \"markup\"\n                      }\n                  }\n              }\n          }\n      }\n      \"\"\"\n\n    let s = peg \"doc\":\n      S              <- *Space\n      jtrue          <- \"true\"\n      jfalse         <- \"false\"\n      jnull          <- \"null\"\n\n      unicodeEscape  <- 'u' * Xdigit[4]\n      escape         <- '\\\\' * ({ '{', '\"', '|', '\\\\', 'b', 'f', 'n', 'r', 't' } | unicodeEscape)\n      stringBody     <- ?escape * *( +( {'\\x20'..'\\xff'} - {'\"'} - {'\\\\'}) * *escape) \n      jstring        <- ?S * '\"' * stringBody * '\"' * ?S\n\n      minus          <- '-'\n      intPart        <- '0' | (Digit-'0') * *Digit\n      fractPart      <- \".\" * +Digit\n      expPart        <- ( 'e' | 'E' ) * ?( '+' | '-' ) * +Digit\n      jnumber        <- ?minus * intPart * ?fractPart * ?expPart\n\n      doc            <- JSON * !1\n      JSON           <- ?S * ( jnumber | jobject | jarray | jstring | jtrue | jfalse | jnull ) * ?S\n      jobject        <- '{' * ( jstring * \":\" * JSON * *( \",\" * jstring * \":\" * JSON ) | ?S ) * \"}\"\n      jarray         <- \"[\" * ( JSON * *( \",\" * JSON ) | ?S ) * \"]\"\n\n    doAssert s.match(json).ok\n\n  ######################################################################\n\n  test \"HTTP with action captures to Nim object\":\n\n    type\n      Request = object\n        proto: string\n        version: string\n        code: int\n        message: string\n        headers: Table[string, string]\n\n    let s = peg(\"http\", userdata: Request):\n      space       <- ' '\n      crlf        <- '\\n' * ?'\\r'\n      url         <- +(Alpha | Digit | '/' | '_' | '.')\n      eof         <- !1\n      header_name <- +(Alpha | '-')\n      header_val  <- +(1-{'\\n'}-{'\\r'})\n      proto       <- >(+Alpha):\n        userdata.proto = $1\n      version     <- >(+Digit * '.' * +Digit):\n        userdata.version = $1\n      code        <- >+Digit:\n        userdata.code = parseInt($1)\n      msg         <- >(+(1 - '\\r' - '\\n')):\n        userdata.message = $1\n      header      <- >header_name * \": \" * >header_val:\n        userdata.headers[$1] = $2\n\n      response    <- proto * '/' * version * space * code * space * msg \n      headers     <- *(header * crlf)\n      http        <- response * crlf * headers * eof\n\n    let data = \"\"\"\nHTTP/1.1 301 Moved Permanently\nContent-Length: 162\nContent-Type: text/html\nLocation: https://nim.org/\n\"\"\"\n\n    var req: Request\n    let res = s.match(data, req)\n    doAssert res.ok\n    doAssert req.proto == \"HTTP\"\n    doAssert req.version == \"1.1\"\n    doAssert req.code == 301\n    doAssert req.message == \"Moved Permanently\"\n    doAssert req.headers[\"Content-Length\"] == \"162\"\n    doAssert req.headers[\"Content-Type\"] == \"text/html\"\n    doAssert req.headers[\"Location\"] == \"https://nim.org/\"\n\n  ######################################################################\n\n  test \"UTF-8\":\n\n    let b = \"  añyóng  ♜♞♝♛♚♝♞♜ оживлённым   \"\n\n    let m = peg \"s\":\n\n      cont <- {128..191}\n\n      utf8 <- {0..127} |\n              {194..223} * cont[1] |\n              {224..239} * cont[2] |\n              {240..244} * cont[3]\n\n      s <- *(@ > +(utf8-' '))\n\n    let r = m.match(b)\n    doAssert r.ok\n    let c = r.captures\n    doAssert c == @[\"añyóng\", \"♜♞♝♛♚♝♞♜\", \"оживлённым\"]\n\n  ######################################################################\n\n  test \"Back references\":\n\n    let p = peg \"doc\":\n      S <- *Space\n      doc <- +word * \"<<\" * R(\"sep\", sep) * S * >heredoc * R(\"sep\") * S * +word\n      word <- +Alpha * S\n      sep <- +Alpha\n      heredoc <- +(1 - R(\"sep\"))\n\n    let d = \"\"\"This is a <<EOT here document\n    with multiple lines EOT end\"\"\"\n\n    let r = p.match(d)\n    doAssert r.ok\n    doAssert r.captures[0] == \"here document\\n    with multiple lines \"\n\n  ######################################################################\n  \n  test \"RFC3986: Uniform Resource Identifier (URI): Generic Syntax\":\n\n    type Uri = object\n      scheme: string\n      userinfo: string\n      host: string\n      path: string\n      port: string\n      query: string\n      fragment: string\n\n    # The grammar below is a literal translation of the ABNF notation of the\n    # RFC. Optimizations can be made to limit backtracking, but this is a nice\n    # example how to create a parser from a RFC protocol description.\n\n    let p = peg(\"URI\", userdata: Uri):\n\n      URI <- scheme * \":\" * hier_part * ?( \"?\" * query) * ?( \"#\" * fragment) * !1\n\n      hier_part <- \"//\" * authority * path_abempty |\n                   path_absolute |\n                   path_rootless |\n                   path_empty\n\n      URI_reference <- uri | relative_ref\n\n      absolute_uri <- scheme * \":\" * hier_part * ?( \"?\" * query)\n\n      relative_ref <- relative_part * ?( \"?\" * query) * ?( \"#\" * fragment)\n\n      relative_part <- \"//\" * authority * path_abempty |\n                       path_absolute |\n                       path_noscheme |\n                       path_empty\n\n      scheme <- >(Alpha * *( Alpha | Digit | \"+\" | \"-\" | \".\" )): userdata.scheme = $1\n\n      authority <- ?(userinfo * \"@\") * host * ?( \":\" * port)\n      userinfo <- >*(unreserved | pct_encoded | sub_delims | \":\"):\n        userdata.userinfo = $1\n\n      host <- >(IP_literal | IPv4address | reg_name): userdata.host = $1\n      port <- >*Digit: userdata.port = $1\n\n      IP_literal <- \"[\" * (IPv6address | IPvFuture) * \"]\"\n\n      IPvFuture <- \"v\" * +Xdigit * \".\" * +(unreserved | sub_delims | \":\")\n\n      IPv6address <-                                     (h16 * \":\")[6] * ls32 |\n                                                  \"::\" * (h16 * \":\")[5] * ls32 |\n                   ?( h16                     ) * \"::\" * (h16 * \":\")[4] * ls32 |\n                   ?( h16 * (\":\" * h16)[0..1] ) * \"::\" * (h16 * \":\")[3] * ls32 |\n                   ?( h16 * (\":\" * h16)[0..2] ) * \"::\" * (h16 * \":\")[2] * ls32 |\n                   ?( h16 * (\":\" * h16)[0..3] ) * \"::\" * (h16 * \":\")    * ls32 |\n                   ?( h16 * (\":\" * h16)[0..4] ) * \"::\" *                  ls32 |\n                   ?( h16 * (\":\" * h16)[0..5] ) * \"::\" *                  h16  |\n                   ?( h16 * (\":\" * h16)[0..6] ) * \"::\"\n\n      h16 <- Xdigit[1..4]\n      ls32 <- (h16 * \":\" * h16) | IPv4address\n      IPv4address <- dec_octet * \".\" * dec_octet * \".\" * dec_octet * \".\" * dec_octet\n\n      dec_octet <- Digit                   | # 0-9\n                  {'1'..'9'} * Digit       | # 10-99\n                  \"1\" * Digit * Digit      | # 100-199\n                  \"2\" * {'0'..'4'} * Digit | # 200-249\n                  \"25\" * {'0'..'5'}          # 250-255\n\n      reg_name <- *(unreserved | pct_encoded | sub_delims)\n\n      path <- path_abempty  | # begins with \"/\" or is empty\n              path_absolute | # begins with \"/\" but not \"//\"\n              path_noscheme | # begins with a non-colon segment\n              path_rootless | # begins with a segment\n              path_empty      # zero characters\n\n      path_abempty  <- >(*( \"/\" * segment )): userdata.path = $1\n      path_absolute <- >(\"/\" * ?( segment_nz * *( \"/\" * segment ) )): userdata.path = $1\n      path_noscheme <- >(segment_nz_nc * *( \"/\" * segment )): userdata.path = $1\n      path_rootless <- >(segment_nz * *( \"/\" * segment )): userdata.path = $1\n      path_empty    <- 0\n\n      segment       <- *pchar\n      segment_nz    <- +pchar\n      segment_nz_nc <- +( unreserved | pct_encoded | sub_delims | \"@\" )\n                    # non_zero_length segment without any colon \":\"\n\n      pchar         <- unreserved | pct_encoded | sub_delims | \":\" | \"@\"\n\n      query         <- >*( pchar | \"|\" | \"?\" ): userdata.query = $1\n\n      fragment      <- >*( pchar | \"|\" | \"?\" ): userdata.fragment = $1\n\n      pct_encoded   <- \"%\" * Xdigit * Xdigit\n\n      unreserved    <- Alpha | Digit | \"-\" | \".\" | \"_\" | \"~\"\n      reserved      <- gen_delims | sub_delims\n      gen_delims    <- \":\" | \"|\" | \"?\" | \"#\" | \"[\" | \"]\" | \"@\"\n      sub_delims    <- \"!\" | \"$\" | \"&\" | \"'\" | \"(\" | \")\" | \"*\" | \"+\" | \",\" | \";\" | \"=\"\n\n    let urls = @[\n      \"s3://somebucket/somefile.txt\",\n      \"scheme://user:pass@xn--mgbh0fb.xn--kgbechtv\",\n      \"scheme://user:pass@host:81/path?query#fragment\",\n      \"ScheMe://user:pass@HoSt:81/path?query#fragment\",\n      \"scheme://HoSt:81/path?query#fragment\",\n      \"scheme://@HoSt:81/path?query#fragment\",\n      \"scheme://user:pass@host/path?query#fragment\",\n      \"scheme://user:pass@host:/path?query#fragment\",\n      \"scheme://host/path?query#fragment\",\n      \"scheme://10.0.0.2/p?q#f\",\n      \"scheme://[vAF.1::2::3]/p?q#f\",\n      \"scheme:path?query#fragment\",\n      \"scheme:///path?query#fragment\",\n      \"scheme://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]?query#fragment\",\n      \"scheme:path#fragment\",\n      \"scheme:path?#fragment\",\n      \"ldap://[2001:db8::7]/c=GB?objectClass?one\",\n      \"http://example.org/hello:12?foo=bar#test\",\n      \"android-app://org.wikipedia/http/en.m.wikipedia.org/wiki/The_Hitchhiker%27s_Guide_to_the_Galaxy\",\n      \"ftp://:/p?q#f\",\n      \"scheme://user:pass@host:000000000081/path?query#fragment\",\n      \"scheme://user:pass@host:81/path?query#fragment\",\n      \"ScheMe://user:pass@HoSt:81/path?query#fragment\",\n      \"scheme://HoSt:81/path?query#fragment\",\n      \"scheme://@HoSt:81/path?query#fragment\",\n      \"scheme://user:pass@host/path?query#fragment\",\n      \"scheme://user:pass@host:/path?query#fragment\",\n      \"scheme://user:pass@host/path?query#fragment\",\n      \"scheme://host/path?query#fragment\",\n      \"scheme://10.0.0.2/p?q#f\",\n      \"scheme:path?query#fragment\",\n      \"scheme:///path?query#fragment\",\n      \"scheme://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]?query#fragment\",\n      \"scheme:path#fragment\",\n      \"scheme:path?#fragment\",\n      \"tel:05000\",\n      \"scheme:path#\",\n      \"https://thephpleague.com./p?#f\",\n      \"http://a_.!~*\\'(-)n0123Di%25%26:pass;:&=+$,word@www.zend.com\",\n      \"http://\",\n      \"http:::/path\",\n      \"ldap://[2001:db8::7]/c=GB?objectClass?one\",\n      \"http://example.org/hello:12?foo=bar#test\",\n      \"android-app://org.wikipedia/http/en.m.wikipedia.org/wiki/The_Hitchhiker%27s_Guide_to_the_Galaxy\",\n      \"scheme://user:pass@xn--mgbh0fb.xn--kgbechtv\",\n      \"http://download.linuxjournal.com/pdf/get-doc.php?code=2c230d54e20e7cb595c660da48be7622&tcode=epub-301-\"\n    ]\n\n    for s in urls:\n      var uri: Uri\n      let r = p.match(s, uri)\n      if not r.ok:\n        echo s\n        quit 1\n"
  },
  {
    "path": "tests/lexparse.nim",
    "content": "import npeg, strutils, sequtils, unittest\n\ntype\n\n  Token* = enum\n    tInt\n    tAdd\n    cAddExpr\n\n  Node = ref object\n    case kind: Token\n    of tInt:\n      intVal: int\n    of tAdd:\n      discard\n    of cAddExpr:\n      l, r: Node\n\n  State = ref object\n    tokens: seq[Node]\n    stack: seq[Node]\n\n# Npeg uses `==` to check if a subject matches a literal\n\nproc `==`(n: Node, t: Token): bool = n.kind == t\n\nproc `$`(n: Node): string =\n  case n.kind\n    of tInt: return $n.intVal\n    of tAdd: return \"+\"\n    of cAddExpr: return \"(\" & $n.l & \" + \" & $n.r & \")\"\n\nlet lexer = peg(tokens, st: State):\n  s      <- *Space\n  tokens <- s * *(token * s)\n  token  <- int | add\n  int    <- +Digit:\n    st.tokens.add Node(kind: tInt, intVal: parseInt($0))\n  add    <- '+':\n    st.tokens.add Node(kind: tAdd)\n\nlet parser = peg(g, Node, st: State):\n  g   <- int * *add * !1\n  int <- [tInt]:\n    st.stack.add $0\n  add <- [tAdd] * int:\n    st.stack.add Node(kind: cAddExpr, r: st.stack.pop, l: st.stack.pop)\n\nsuite \"lexer/parser\":\n\n  test \"run\":\n\n    var st = State()\n    doAssert lexer.match(\"1 + 2 + 3\", st).ok\n    doAssert parser.match(st.tokens, st).ok\n    doAssert $st.stack[0] == \"((1 + 2) + 3)\"\n\n\n\n"
  },
  {
    "path": "tests/lib.nim",
    "content": "import unittest\nimport strutils\nimport unicode\nimport npeg\nimport npeg/lib/types\nimport npeg/lib/utf8\n\n{.push warning[Spacing]: off.}\n\n\nsuite \"unit tests\":\n\n  test \"types\":\n    doAssert     patt(types.uint8).match(\"0\").ok\n    doAssert     patt(types.uint8).match(\"255\").ok\n    doAssert not patt(types.uint8).match(\"256\").ok\n\n    doAssert     patt(types.int8).match(\"-128\").ok\n    doAssert     patt(types.int8).match(\"127\").ok\n    doAssert not patt(types.int8).match(\"-129\").ok\n    doAssert not patt(types.int8).match(\"128\").ok\n    \n    when defined(cpu64):\n      doAssert     patt(types.uint32).match(\"4294967295\").ok\n      doAssert not patt(types.uint32).match(\"4294967296\").ok\n\n\n  test \"utf8 runes\":\n    doAssert     patt(utf8.any[4] * !1).match(\"abcd\").ok\n    doAssert     patt(utf8.any[4] * !1).match(\"ａｂｃｄ\").ok\n    doAssert     patt(utf8.any[4] * !1).match(\"всех\").ok\n    doAssert     patt(utf8.any[4] * !1).match(\"乪乫乬乭\").ok\n\n  test \"utf8 character classes\":\n    doAssert     patt(utf8.upper).match(\"Ɵ\").ok\n    doAssert not patt(utf8.upper).match(\"ë\").ok\n    doAssert not patt(utf8.lower).match(\"Ɵ\").ok\n    doAssert     patt(utf8.lower).match(\"ë\").ok\n"
  },
  {
    "path": "tests/nimversion.nim",
    "content": "\nimport strutils\nimport npeg\n\ntype\n  NimType = enum Nim, NimSkull\n\n  Version = object\n    maj, min, rev: int\n    extra: string\n\n  NimVersion = object\n    typ: NimType\n    version: Version\n    os: string\n    cpu: string\n    date: string\n    git: string\n    boot_switches: seq[string]\n\n\nlet p = peg(\"nimversion\", nv: NimVersion):\n\n  S <- *{' ','\\t','\\n','\\r'}\n  nimversion <- oldnim_version | nimskull_version\n\n  oldnim_version <- header * S *\n                    \"Compiled at \" * date * S *\n                    \"Copyright (c) \" * +Graph * \" by Andreas Rumpf\" * S *\n                    \"git hash:\" * S * git * S * \n                    \"active boot switches:\" * S * boot_switches\n\n  nimskull_version <- header * S *\n                      \"Source hash: \" * git * S *\n                      \"Source date: \" * date\n\n  header <- typ * S * \"Compiler Version\" * S * version * S * \"[\" * os * \":\" * S * cpu * \"]\" * S\n\n  typ <- typ_nimskull | typ_nim\n  typ_nim <- \"Nim\": nv.typ = NimType.Nim\n  typ_nimskull <- \"Nimskull\": nv.typ = NimType.NimSkull\n\n  int <- +{'0'..'9'}\n  os <- >+Alnum: nv.os = $1\n  cpu <- >+Alnum: nv.cpu = $1\n  git <- >+{'0'..'9','a'..'f'}: nv.git = $1\n  boot_switches <- *(boot_switch * S)\n  boot_switch <- >+Graph: nv.boot_switches.add($1)\n  date <- >+{'0'..'9','-'}: nv.date = $1\n  version <- >int * \".\" * >int * \".\" * >int * ?\"-\" * >*Graph:\n    nv.version.maj = parseInt($1)\n    nv.version.min = parseInt($2)\n    nv.version.rev = parseInt($3)\n    nv.version.extra = $4\n\n\nlet vnim = \"\"\"Nim Compiler Version 2.1.1 [Linux: amd64]\nCompiled at 2024-03-01\nCopyright (c) 2006-2024 by Andreas Rumpf\n\ngit hash: 1e7ca2dc789eafccdb44304f7e42206c3702fc13\nactive boot switches: -d:release -d:danger\n\"\"\"\n\nlet vskull = \"\"\"Nimskull Compiler Version 0.1.0-dev.21234 [linux: amd64]\n\nSource hash: 4948ae809f7d84ef6d765111a7cd0c7cf2ae77d2\nSource date: 2024-02-18\n\"\"\"\n\nvar nv: NimVersion\n\nblock:\n  let r = p.match(vnim, nv)\n  if r.ok:\n    echo nv.repr\n\nblock:\n  let r = p.match(vskull, nv)\n  if r.ok:\n    echo nv.repr\n\n"
  },
  {
    "path": "tests/performance.nim",
    "content": "\nimport npeg\nimport os\nimport streams\nimport strutils\nimport tables\nimport json\nimport times\n#import packedjson\nimport osproc\n\nlet js = execProcess(\"bzip2 -d < tests/json-32M.bzip2\").string\n\nlet hostname = readFile(\"/etc/hostname\").strip()\n\nlet expectTime = {\n  \"platdoos\": { \n    \"json\": 0.651,\n    \"parsejson\": 3.962,\n    \"words\": 0.920,\n    \"search\": 0.057,\n    \"search1\": 0.231,\n    \"search2\": 1.419,\n    \"search3\": 0.292,\n  }.toTable(),\n  \"fe2\": { \n    \"json\": 3.975,\n    \"parsejson\": 8.739,\n    \"words\": 2.391,\n    \"search\": 0.373,\n    \"search1\": 2.014,\n    \"search2\": 2.871,\n    \"search3\": 0.771,\n  }.toTable(),\n}.toTable()\n\n\n# Wake up the governor a bit\n\nvar v = 0\nfor i in 1..100000:\n  for j in 1..1000000:\n    inc v\n\n\ntemplate measureTime*(what: string, code: untyped) =\n\n  var expect = 0.0\n  if hostname in expectTime:\n    if what in expectTime[hostname]:\n      expect = expectTime[hostname][what]\n\n  let start = cpuTime()\n  block:\n    code\n  let duration = cpuTime() - start\n  let perc = 100.0 * duration / expect\n  echo what & \": \", duration.formatFloat(ffDecimal, 3), \"s \", perc.formatFloat(ffDecimal, 1), \"%\"\n\n\nmeasureTime \"json\":\n\n  ## Json parsing with npeg\n\n  let p = peg JSON:\n    S              <- *{' ','\\t','\\r','\\n'}\n    True           <- \"true\"\n    False          <- \"false\"\n    Null           <- \"null\"\n\n    UnicodeEscape  <- 'u' * Xdigit[4]\n    Escape         <- '\\\\' * ({ '\"', '\\\\', '/', 'b', 'f', 'n', 'r', 't' } | UnicodeEscape)\n    StringBody     <- *Escape * *( +( {'\\x20'..'\\xff'} - {'\"'} - {'\\\\'}) * *Escape) \n    String         <- '\"' * StringBody * '\"':\n      discard\n\n    Minus          <- '-'\n    IntPart        <- '0' | {'1'..'9'} * *{'0'..'9'}\n    FractPart      <- \".\" * +{'0'..'9'}\n    ExpPart        <- ( 'e' | 'E' ) * ?( '+' | '-' ) * +{'0'..'9'}\n    Number         <- ?Minus * IntPart * ?FractPart * ?ExpPart:\n      discard\n\n    DOC            <- Value * !1\n    ObjPair        <- S * String * S * \":\" * Value\n    Object         <- '{' * ( ObjPair * *( \",\" * ObjPair ) | S ) * \"}\"\n    Array          <- \"[\" * ( Value * *( \",\" * Value ) | S ) * \"]\"\n    Value          <- S * ( Number | String | Object | Array | True | False | Null ) * S\n\n    JSON           <- Value * !1\n\n  for i in 1..10:\n    doAssert p.match(js).ok\n\n\nlet s = newStringStream(js)\nmeasureTime \"parsejson\":\n  # JSon parsing with nims 'parsejson' module.\n  for i in 1..10:\n    s.setPosition(0)\n    var p: JsonParser\n    open(p, s, \"json\")\n    while true:\n      p.next()\n      if p.kind == jsonError or p.kind == jsonEof:\n        break\n\n\nmeasureTime \"words\":\n\n  var v = 0\n  let p = peg foo:\n    foo <- +word\n    word <- @>+Alpha:\n      inc v\n  discard p.match(js).ok\n\n\nmeasureTime \"search\":\n  # Search using built in search operator\n  var v = 0\n  let p = peg search:\n    search <- @\"CALIFORNIA\":\n      inc v\n  for i in 1..10:\n    discard p.match(js).ok\n\n\nmeasureTime \"search1\":\n  # Searches using tail recursion.\n  let p = peg SS:\n    SS <- +S\n    S <- \"CALIFORNIA\" | 1 * S\n  for i in 1..10:\n    discard p.match(js).ok\n\nmeasureTime \"search2\":\n  # Searches using an explicit\n  let p = peg SS:\n    SS <- +S\n    S <- *( !\"CALIFORNIA\" * 1) * \"CALIFORNIA\"\n  for i in 1..10:\n    discard p.match(js).ok\n\nmeasureTime \"search3\":\n   # using an optimization to skip false starts.\n  let p = peg SS:\n    SS <- +S\n    S <- \"CALIFORNIA\" | 1 * *(1-'C') * S\n  for i in 1..10:\n    discard p.match(js).ok\n\n"
  },
  {
    "path": "tests/precedence.nim",
    "content": "import unittest\nimport strutils\nimport math\nimport tables\nimport npeg\n\n{.push warning[Spacing]: off.}\n\n\nsuite \"precedence operator\":\n\n  # The PEG below implements a Pratt parser. The ^ and ^^ operators are used to\n  # implement precedence climbing, this allows rules to be left recursive while\n  # still avoiding unbound recursion.\n  #\n  # The parser local state `seq[int]` is used as a stack to store captures and\n  # intermediate results while parsing, the end result of the expression will\n  # be available in element 0 when the parser finishes\n\n  test \"expr evaluator\":\n\n    # Table of binary operators - this maps the operator string to a proc\n    # performing the operation:\n\n    template map(op: untyped): untyped = (proc(a, b: int): int = op(a, b))\n\n    var binOps = {\n      \"+\": map(`+`),\n      \"-\": map(`-`),\n      \"*\": map(`*`),\n      \"/\": map(`/%`),\n      \"^\": map(`^`),\n    }.toTable()\n\n    let p = peg(exp, st: seq[int]):\n\n      S <- *Space\n\n      # Capture a number and put it on the stack\n\n      number <- >+Digit * S:\n        st.add parseInt($1)\n\n      # Reset the precedence level to 0 when parsing sub-expressions\n      # in parentheses\n\n      parenExp <- ( \"(\" * exp * \")\" ) ^ 0\n\n      # Unary minues: take last element of the stack, negate and push back\n\n      uniMinus <- '-' * exp:\n        st.add(-st.pop)\n\n      # The prefix is a number, a sub expression in parentheses or the unary\n      # `-` operator.\n\n      prefix <- number | parenExp | uniMinus\n\n      # Parse an infix operator. Bounded by the precedece operator that makes\n      # sure `exp` is only parsed if the currrent precedence is lower then the\n      # given precedence. Note that the power operator has right assosiativity.\n\n      infix <- >{'+','-'}    * exp ^  1 |\n               >{'*','/'}    * exp ^  2 |\n               >{'^'}        * exp ^^ 3 :\n\n        # Takes two results off the stack, applies the operator and push\n        # back the result\n\n        let (f2, f1) = (st.pop, st.pop)\n        st.add binOps[$1](f1, f2)\n\n      # An expression consists of a prefix followed by zero or more infix\n      # operators\n\n      exp <- S * prefix * *infix\n\n\n    # Evaluate the given expression\n\n    proc eval(expr: string): int =\n      var st: seq[int]\n      doAssert p.match(expr, st).ok\n      st[0]\n\n\n    # Test cases\n\n    doAssert eval(\"2+1\") == 2+1\n    doAssert eval(\"(((2+(1))))\") == 2+1\n    doAssert eval(\"3+2\") == 3+2\n\n    doAssert eval(\"3+2+4\") == 3+2+4\n    doAssert eval(\"(3+2)+4\") == 3+2+4\n    doAssert eval(\"3+(2+4)\") == 3+2+4\n    doAssert eval(\"(3+2+4)\") == 3+2+4\n\n    doAssert eval(\"3*2*4\") == 3*2*4\n    doAssert eval(\"(3*2)*4\") == 3*2*4\n    doAssert eval(\"3*(2*4)\") == 3*2*4\n    doAssert eval(\"(3*2*4)\") == 3*2*4\n\n    doAssert eval(\"3-2-4\") == 3-2-4\n    doAssert eval(\"(3-2)-4\") == (3-2)-4\n    doAssert eval(\"3-(2-4)\") == 3-(2-4)\n    doAssert eval(\"(3-2-4)\") == 3-2-4\n\n    doAssert eval(\"3/8/4\") == 3/%8/%4\n    doAssert eval(\"(3/8)/4\") == (3/%8)/%4\n    doAssert eval(\"3/(8/4)\") == 3/%(8/%4)\n    doAssert eval(\"(3/8/4)\") == 3/%8/%4\n\n    doAssert eval(\"(3*8/4)\") == 3*8/%4\n    doAssert eval(\"(3/8*4)\") == 3/%8*4\n    doAssert eval(\"3*(8/4)\") == 3*(8/%4)\n\n    doAssert eval(\"4^3^2\") == 4^3^2\n    doAssert eval(\"(4^3)^2\") == (4^3)^2\n    doAssert eval(\"4^(3^2)\") == 4^(3^2)\n\n"
  },
  {
    "path": "tests/testdata",
    "content": "one=1,two=2,three=3,four=4\n"
  },
  {
    "path": "tests/tests.nim",
    "content": "include \"basics.nim\"\ninclude \"examples.nim\"\ninclude \"captures.nim\"\ninclude \"precedence.nim\"\ninclude \"lib.nim\"\ninclude \"lexparse.nim\"\n\n"
  }
]