[
  {
    "path": ".gitattributes",
    "content": "/tests export-ignore\n"
  },
  {
    "path": ".github/workflows/tests.yml",
    "content": "name: Tests\n\non:\n    push:\n        branches: ['master']\n    pull_request:\n\njobs:\n    tests:\n        name: PHP ${{ matrix.php }}\n        runs-on: ubuntu-latest\n\n        strategy:\n            fail-fast: false\n            matrix:\n                include:\n                    - php: '7.0'\n                      coverage: none\n                    - php: '7.1'\n                      coverage: none\n                    - php: '7.2'\n                      coverage: none\n                    - php: '7.3'\n                      coverage: none\n                    - php: '7.4'\n                      coverage: none\n                    - php: '8.0'\n                      coverage: none\n                    - php: '8.1'\n                      coverage: none\n                    - php: '8.2'\n                      coverage: xdebug\n                      upload_coverage: true\n                    - php: '8.3'\n                      coverage: none\n                    - php: '8.4'\n                      coverage: none\n                    - php: '8.5'\n                      coverage: none\n\n        steps:\n            - name: Checkout\n              uses: actions/checkout@v4\n              with:\n                  fetch-depth: 0\n\n            - name: Setup PHP\n              uses: shivammathur/setup-php@v2\n              with:\n                  php-version: ${{ matrix.php }}\n                  coverage: ${{ matrix.coverage }}\n\n            - name: Configure Composer (public deps)\n              run: |\n                  composer config -g github-protocols https\n                  composer config -g use-github-api false\n\n            - name: Composer version\n              run: composer --version\n\n            - name: Validate composer.json\n              run: composer validate --strict\n\n            - name: Install dependencies\n              run: composer update --prefer-dist\n\n            - name: Run PHPUnit (no coverage)\n              if: ${{ !matrix.upload_coverage }}\n              run: vendor/bin/phpunit -c phpunit.xml --colors=always\n\n            - name: Run PHPUnit with coverage\n              if: ${{ matrix.upload_coverage }}\n              run: vendor/bin/phpunit -c phpunit.xml --colors=always --coverage-clover=coverage.xml\n\n            - name: Upload coverage to Codecov\n              if: ${{ matrix.upload_coverage }}\n              uses: codecov/codecov-action@v4\n              with:\n                  files: ./coverage.xml\n                  flags: all\n                  fail_ci_if_error: true\n              env:\n                  CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}\n"
  },
  {
    "path": ".gitignore",
    "content": "composer.lock\n.php_cs.cache\n.phpunit.result.cache\n/vendor/\n"
  },
  {
    "path": ".php_cs.dist",
    "content": "<?php\n\n$finder = PhpCsFixer\\Finder::create()\n    ->in(__DIR__)\n    ->exclude([])\n    ->files()->name('*.php')\n;\n\nreturn PhpCsFixer\\Config::create()\n    ->setRules([\n        '@Symfony' => true,\n        '@Symfony:risky' => true,\n        'concat_space' => ['spacing' => 'one'],\n        'array_syntax' => ['syntax' => 'short'],\n        'simplified_null_return' => false,\n        'phpdoc_align' => false,\n        'phpdoc_separation' => false,\n        'phpdoc_to_comment' => false,\n        'no_useless_else' => true,\n        'no_useless_return' => true,\n        'ordered_class_elements' => true,\n        'ordered_imports' => true,\n        'cast_spaces' => false,\n        'blank_line_after_opening_tag' => false,\n        'single_blank_line_before_namespace' => false,\n        'phpdoc_annotation_without_dot' => false,\n        'phpdoc_no_alias_tag' => false,\n        'space_after_semicolon' => false,\n    ])\n    ->setRiskyAllowed(true)\n    ->setFinder($finder)\n;\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2017 Petar Španja <petar@spanja.info>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# Query Translator\n\n[![Build Status](https://img.shields.io/github/actions/workflow/status/netgen/query-translator/tests.yml?branch=master&&style=flat-square)](https://github.com/netgen/query-translator/actions?query=workflow%3ATests)\n[![Code Coverage](https://img.shields.io/codecov/c/github/netgen/query-translator.svg?style=flat-square)](https://codecov.io/gh/netgen/query-translator)\n[![Downloads](https://img.shields.io/packagist/dt/netgen/query-translator.svg?style=flat-square)](https://packagist.org/packages/netgen/query-translator)\n[![Latest stable](https://img.shields.io/packagist/v/netgen/query-translator.svg?style=flat-square)](https://packagist.org/packages/netgen/query-translator)\n[![License](https://img.shields.io/packagist/l/netgen/query-translator.svg?style=flat-square)](https://packagist.org/packages/netgen/query-translator)\n[![PHP](https://img.shields.io/badge/php-%3E%3D%205.6-8892BF.svg?style=flat-square)](https://secure.php.net/)\n[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/netgen/query-translator)\n\nQuery Translator takes a search string as user input and converts it into something a search backend\ncan understand. Technically, it's a search query\n[translator](https://en.wikipedia.org/wiki/Translator_(computing)) with\n[abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree) representation. From the\nproduced syntax tree, translation target can be anything you need. Usually it's a search backend,\nlike Solr and Elasticsearch, or a database abstraction layer.\n\nA set of interfaces for implementing a language processor is provided, with a single implemented\nlanguage named [Galach](lib/Languages/Galach). Galach implements a syntax that is based on what\nseems to be the unofficial standard for search query as user input. Quick cheat sheet:\n\n`word` `\"phrase\"` `(group)` `+mandatory` `-prohibited` `AND` `&&` `OR` `||` `NOT` `!` `#tag` `@user`\n`domain:term`\n\n### Error handling\n\nUser input means you have to expect errors and handle them gracefully. Because of that, the parser\nis completely resistant to errors. Syntax tree will contain detailed information about corrections\napplied to make sense of the user input. This can be useful to clean up the input or implement rich\ninput interface, with features like suggestions, syntax highlighting and error feedback.\n\n### Customization\n\nThe implementation was made with customization in mind. You can change the special characters which\nwill be used as part of the syntax, pick out elements of the language you want to use, implement\nyour own term clauses, or change how the syntax tree is converted to the target output.\n\n### Some use cases\n\n- User-level query language on top of your search backend\n- Common query language on top of different search backends\n- Control over options of the query language that is already provided by the search backend\n- Better error handling than provided by the search backend\n- Analysis and manipulation of the query before sending to the backend\n- Customized query language (while remaining within the base syntax)\n- Implementing rich input interface (with suggestions, syntax highlighting, error feedback)\n\nNote: This implementation is intended as a\n[library](https://en.wikipedia.org/wiki/Library_(computing)), meaning it doesn't try to solve\nspecific use cases for query translation. Instead, it's meant to be a base that you can use in\nimplementing such a use case.\n\n### How to use\n\nFirst add the library to your project:\n\n```\ncomposer require netgen/query-translator:^1.0\n```\n\nAfter that, make use of the features provided out of the box. If those are not enough, use extension\npoints to customize various parts of the translator to fit your needs. See\n[Galach documentation](lib/Languages/Galach) to find out more.\n\n## Run the demo\n\nDemo is available as a separate repository at [netgen/query-translator-demo](https://github.com/netgen/query-translator-demo).\n\nSteps for running the demo:\n\n1. Create the demo project using composer `composer create-project netgen/query-translator-demo`\n2. Position into the demo project directory `cd query-translator-demo`\n3. Start the web server with `src` as the document root `php -S localhost:8005 -t src`\n4. Open [http://localhost:8005](http://localhost:8005) in your browser ![Query Translator demo](https://raw.githubusercontent.com/netgen/query-translator-demo/master/src/animation.gif)\n"
  },
  {
    "path": "composer.json",
    "content": "{\n    \"name\": \"netgen/query-translator\",\n    \"description\": \"Query Translator is a search query translator with AST representation\",\n    \"keywords\": [\n        \"search\",\n        \"query\",\n        \"tokenizer\",\n        \"parser\",\n        \"generator\",\n        \"translator\",\n        \"ast\",\n        \"solr\",\n        \"edismax\",\n        \"elasticsearch\"\n    ],\n    \"type\": \"library\",\n    \"homepage\": \"https://github.com/netgen/query-translator\",\n    \"license\": \"MIT\",\n    \"authors\": [\n        {\n            \"name\": \"Petar Španja\",\n            \"email\": \"petar@spanja.info\"\n        }\n    ],\n    \"require\": {\n        \"php\": \"^7.0||^8.0\"\n    },\n    \"require-dev\": {\n        \"phpunit/phpunit\": \"<10\",\n        \"symfony/phpunit-bridge\": \"*\",\n        \"friendsofphp/php-cs-fixer\": \"^2.11\"\n    },\n    \"autoload\": {\n        \"psr-4\": {\n            \"QueryTranslator\\\\\": \"lib\"\n        }\n    },\n    \"autoload-dev\": {\n        \"psr-4\": {\n            \"QueryTranslator\\\\Tests\\\\\": \"tests\"\n        }\n    },\n    \"scripts\": {\n        \"test\": [\n            \"./vendor/bin/phpunit\"\n        ]\n    },\n    \"extra\": {\n        \"branch-alias\": {\n            \"dev-master\": \"1.0.x-dev\"\n        }\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Common/Aggregate.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Common;\n\nuse QueryTranslator\\Values\\Node;\nuse RuntimeException;\n\n/**\n * Common Aggregate Visitor implementation.\n */\nfinal class Aggregate extends Visitor\n{\n    /**\n     * @var \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor[]\n     */\n    private $visitors = [];\n\n    /**\n     * Construct from the optional array of $visitors.\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor[] $visitors\n     */\n    public function __construct(array $visitors = [])\n    {\n        foreach ($visitors as $visitor) {\n            $this->addVisitor($visitor);\n        }\n    }\n\n    /**\n     * Add a $visitor to the aggregated collection.\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor $visitor\n     */\n    public function addVisitor(Visitor $visitor)\n    {\n        $this->visitors[] = $visitor;\n    }\n\n    public function accept(Node $node)\n    {\n        return true;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        foreach ($this->visitors as $visitor) {\n            if ($visitor->accept($node)) {\n                return $visitor->visit($node, $this, $options);\n            }\n        }\n\n        throw new RuntimeException('No visitor available for ' . get_class($node));\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Common/Visitor.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Common;\n\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Common base class for AST visitor implementations.\n */\nabstract class Visitor\n{\n    /**\n     * Check if visitor accepts the given $node.\n     *\n     * @param \\QueryTranslator\\Values\\Node $node\n     *\n     * @return bool\n     */\n    abstract public function accept(Node $node);\n\n    /**\n     * Visit the given $node.\n     *\n     * @param \\QueryTranslator\\Values\\Node $node\n     * @param \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor $subVisitor\n     * @param mixed $options\n     *\n     * @return string\n     */\n    abstract public function visit(Node $node, Visitor $subVisitor = null, $options = null);\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/ExtendedDisMax.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators;\n\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Values\\SyntaxTree;\n\n/**\n * ExtendedDisMax generator generates query string in Solr Extended DisMax query parser format.\n *\n * @link https://cwiki.apache.org/confluence/display/solr/The+Extended+DisMax+Query+Parser\n */\nfinal class ExtendedDisMax\n{\n    /**\n     * @var \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor\n     */\n    private $visitor;\n\n    public function __construct(Visitor $visitor)\n    {\n        $this->visitor = $visitor;\n    }\n\n    /**\n     * Generate query string in Solr Extended DisMax format from the given $syntaxTree.\n     *\n     * @param \\QueryTranslator\\Values\\SyntaxTree $syntaxTree\n     * @param mixed $options\n     *\n     * @return string\n     */\n    public function generate(SyntaxTree $syntaxTree, $options = null)\n    {\n        return $this->visitor->visit($syntaxTree->rootNode, null, $options);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/Group.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Group as GroupNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Group Node Visitor implementation.\n */\nfinal class Group extends Visitor\n{\n    /**\n     * Mapping of token domain to Solr field name.\n     *\n     * @var array\n     */\n    private $domainFieldMap = [];\n\n    /**\n     * Solr field name to be used when no mapping for a domain is found.\n     *\n     * @var string\n     */\n    private $defaultFieldName;\n\n    /**\n     * @param array|null $domainFieldMap\n     * @param string|null $defaultFieldName\n     */\n    public function __construct(array $domainFieldMap = null, $defaultFieldName = null)\n    {\n        if ($domainFieldMap !== null) {\n            $this->domainFieldMap = $domainFieldMap;\n        }\n\n        $this->defaultFieldName = $defaultFieldName;\n    }\n\n    public function accept(Node $node)\n    {\n        return $node instanceof GroupNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof GroupNode) {\n            throw new LogicException(\n                'Implementation accepts instance of Group Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clauses = [];\n\n        foreach ($node->nodes as $subNode) {\n            $clauses[] = $subVisitor->visit($subNode, $subVisitor, $options);\n        }\n\n        $fieldPrefix = $this->getSolrFieldPrefix($node->tokenLeft);\n        $clauses = implode(' ', $clauses);\n\n        return \"{$fieldPrefix}({$clauses})\";\n    }\n\n    /**\n     * Return Solr backend field name prefix for the given $token.\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin $token\n     *\n     * @return string\n     */\n    private function getSolrFieldPrefix(GroupBegin $token)\n    {\n        if ($token->domain === '') {\n            return '';\n        }\n\n        if (isset($this->domainFieldMap[$token->domain])) {\n            return $this->domainFieldMap[$token->domain] . ':';\n        }\n\n        return $this->defaultFieldName . ':';\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/LogicalAnd.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalAnd as LogicalAndNode;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * LogicalAnd operator Node Visitor implementation.\n */\nfinal class LogicalAnd extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof LogicalAndNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof LogicalAndNode) {\n            throw new LogicException(\n                'Implementation accepts instance of LogicalAnd Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clauses = [\n            $subVisitor->visit($node->leftOperand, $subVisitor, $options),\n            $subVisitor->visit($node->rightOperand, $subVisitor, $options),\n        ];\n\n        return implode(' AND ', $clauses);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/LogicalNot.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalNot as LogicalNotNode;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * LogicalNot operator Node Visitor implementation.\n */\nfinal class LogicalNot extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof LogicalNotNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof LogicalNotNode) {\n            throw new LogicException(\n                'Implementation accepts instance of LogicalNot Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clause = $subVisitor->visit($node->operand, $subVisitor, $options);\n\n        return \"NOT {$clause}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/LogicalOr.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalOr as LogicalOrNode;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * LogicalOr operator Node Visitor implementation.\n */\nfinal class LogicalOr extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof LogicalOrNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof LogicalOrNode) {\n            throw new LogicException(\n                'Implementation accepts instance of LogicalOr Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clauses = [\n            $subVisitor->visit($node->leftOperand, $subVisitor, $options),\n            $subVisitor->visit($node->rightOperand, $subVisitor, $options),\n        ];\n\n        return implode(' OR ', $clauses);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/Mandatory.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Mandatory as MandatoryNode;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Mandatory operator Node Visitor implementation.\n */\nfinal class Mandatory extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof MandatoryNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof MandatoryNode) {\n            throw new LogicException(\n                'Implementation accepts instance of Mandatory Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clause = $subVisitor->visit($node->operand, $subVisitor, $options);\n\n        return \"+{$clause}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/Phrase.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Phrase as PhraseToken;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Phrase Node Visitor implementation.\n */\nfinal class Phrase extends Visitor\n{\n    /**\n     * Mapping of token domain to Solr field name.\n     *\n     * @var array\n     */\n    private $domainFieldMap = [];\n\n    /**\n     * Solr field name to be used when no mapping for a domain is found.\n     *\n     * @var string\n     */\n    private $defaultFieldName;\n\n    /**\n     * @param array|null $domainFieldMap\n     * @param string|null $defaultFieldName\n     */\n    public function __construct(array $domainFieldMap = null, $defaultFieldName = null)\n    {\n        if ($domainFieldMap !== null) {\n            $this->domainFieldMap = $domainFieldMap;\n        }\n\n        $this->defaultFieldName = $defaultFieldName;\n    }\n\n    public function accept(Node $node)\n    {\n        return $node instanceof Term && $node->token instanceof PhraseToken;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof Term) {\n            throw new LogicException(\n                'Implementation accepts instance of Term Node'\n            );\n        }\n\n        $token = $node->token;\n\n        if (!$token instanceof PhraseToken) {\n            throw new LogicException(\n                'Implementation accepts instance of Phrase Token'\n            );\n        }\n\n        $fieldPrefix = $this->getSolrFieldPrefix($token);\n        $phraseEscaped = preg_replace(\"/([\\\\{$token->quote}])/\", '\\\\\\\\$1', $token->phrase);\n\n        return \"{$fieldPrefix}\\\"{$phraseEscaped}\\\"\";\n    }\n\n    /**\n     * Return Solr backend field name prefix for the given $token.\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Values\\Token\\Phrase $token\n     *\n     * @return string\n     */\n    private function getSolrFieldPrefix(PhraseToken $token)\n    {\n        if ($token->domain === '') {\n            return '';\n        }\n\n        if (isset($this->domainFieldMap[$token->domain])) {\n            return $this->domainFieldMap[$token->domain] . ':';\n        }\n\n        return $this->defaultFieldName . ':';\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/Prohibited.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Prohibited as ProhibitedNode;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Prohibited operator Node Visitor implementation.\n */\nfinal class Prohibited extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof ProhibitedNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof ProhibitedNode) {\n            throw new LogicException(\n                'Implementation accepts instance of Prohibited Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clause = $subVisitor->visit($node->operand, $subVisitor, $options);\n\n        return \"-{$clause}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/Query.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Query as QueryNode;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Query Node Visitor implementation.\n */\nfinal class Query extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof QueryNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof QueryNode) {\n            throw new LogicException(\n                'Implementation accepts instance of Query Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clauses = [];\n\n        foreach ($node->nodes as $subNode) {\n            $clauses[] = $subVisitor->visit($subNode, $subVisitor, $options);\n        }\n\n        return implode(' ', $clauses);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/Tag.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Tag as TagToken;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * User Node Visitor implementation.\n */\nfinal class Tag extends Visitor\n{\n    /**\n     * @var string\n     */\n    private $fieldName;\n\n    /**\n     * @param string $fieldName\n     */\n    public function __construct($fieldName = null)\n    {\n        $this->fieldName = $fieldName;\n    }\n\n    public function accept(Node $node)\n    {\n        return $node instanceof Term && $node->token instanceof TagToken;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof Term) {\n            throw new LogicException(\n                'Implementation accepts instance of Term Node'\n            );\n        }\n\n        $token = $node->token;\n\n        if (!$token instanceof TagToken) {\n            throw new LogicException(\n                'Implementation accepts instance of Tag Token'\n            );\n        }\n\n        $fieldPrefix = $this->fieldName === null ? '' : \"{$this->fieldName}:\";\n\n        return \"{$fieldPrefix}{$token->tag}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/User.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\User as UserToken;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * User Node Visitor implementation.\n */\nfinal class User extends Visitor\n{\n    /**\n     * @var string\n     */\n    private $fieldName;\n\n    /**\n     * @param string $fieldName\n     */\n    public function __construct($fieldName = null)\n    {\n        $this->fieldName = $fieldName;\n    }\n\n    public function accept(Node $node)\n    {\n        return $node instanceof Term && $node->token instanceof UserToken;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof Term) {\n            throw new LogicException(\n                'Implementation accepts instance of Term Node'\n            );\n        }\n\n        $token = $node->token;\n\n        if (!$token instanceof UserToken) {\n            throw new LogicException(\n                'Implementation accepts instance of User Token'\n            );\n        }\n\n        $fieldPrefix = $this->fieldName === null ? '' : \"{$this->fieldName}:\";\n\n        return \"{$fieldPrefix}{$token->user}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/Common/WordBase.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Word as WordToken;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Base Word Node Visitor implementation.\n */\nabstract class WordBase extends Visitor\n{\n    /**\n     * Mapping of token domain to the backend field name.\n     *\n     * @var array\n     */\n    private $domainFieldMap = [];\n\n    /**\n     * Solr field name to be used when no mapping for a domain is found.\n     *\n     * @var string\n     */\n    private $defaultFieldName;\n\n    /**\n     * @param array|null $domainFieldMap\n     * @param string|null $defaultFieldName\n     */\n    public function __construct(array $domainFieldMap = null, $defaultFieldName = null)\n    {\n        if ($domainFieldMap !== null) {\n            $this->domainFieldMap = $domainFieldMap;\n        }\n\n        $this->defaultFieldName = $defaultFieldName;\n    }\n\n    public function accept(Node $node)\n    {\n        return $node instanceof Term && $node->token instanceof WordToken;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof Term) {\n            throw new LogicException(\n                'Implementation accepts instance of Term Node'\n            );\n        }\n\n        $token = $node->token;\n\n        if (!$token instanceof WordToken) {\n            throw new LogicException(\n                'Implementation accepts instance of Word Token'\n            );\n        }\n\n        $fieldPrefix = $this->getSolrFieldPrefix($token);\n        $wordEscaped = $this->escapeWord($token->word);\n\n        return \"{$fieldPrefix}{$wordEscaped}\";\n    }\n\n    /**\n     * Escape special characters in the given word $string.\n     *\n     * @param string $string\n     *\n     * @return string\n     */\n    abstract protected function escapeWord($string);\n\n    /**\n     * Return backend field name prefix for the given $token.\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Values\\Token\\Word $token\n     *\n     * @return string\n     */\n    private function getSolrFieldPrefix(WordToken $token)\n    {\n        if ($token->domain === '') {\n            return '';\n        }\n\n        if (isset($this->domainFieldMap[$token->domain])) {\n            return $this->domainFieldMap[$token->domain] . ':';\n        }\n\n        return $this->defaultFieldName . ':';\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/ExtendedDisMax/Word.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\ExtendedDisMax;\n\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\WordBase;\n\n/**\n * Word Node Visitor implementation.\n */\nfinal class Word extends WordBase\n{\n    /**\n     * {@inheritdoc}\n     *\n     * @link http://lucene.apache.org/core/5_0_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters\n     *\n     * Note: additionally to what is defined above we also escape blank space.\n     */\n    protected function escapeWord($string)\n    {\n        return preg_replace(\n            '/(\\\\+|-|&&|\\\\|\\\\||!|\\\\(|\\\\)|\\\\{|}|\\\\[|]|\\\\^|\"|~|\\\\*|\\\\?|:|\\\\/|\\\\\\\\| )/',\n            '\\\\\\\\$1',\n            $string\n        );\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Lucene/QueryString/Word.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\QueryString;\n\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\WordBase;\n\n/**\n * Word Node Visitor implementation.\n */\nfinal class Word extends WordBase\n{\n    /**\n     * {@inheritdoc}\n     *\n     * @link http://lucene.apache.org/core/6_5_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Escaping_Special_Characters\n     *\n     * Note: additionally to what is defined above we also escape blank space.\n     */\n    protected function escapeWord($string)\n    {\n        return preg_replace(\n            '/(\\\\+|-|\\\\=|&&|\\\\|\\\\||\\\\>|\\\\<|!|\\\\(|\\\\)|\\\\{|}|\\\\[|]|\\\\^|\"|~|\\\\*|\\\\?|:|\\\\/|\\\\\\\\| )/',\n            '\\\\\\\\$1',\n            $string\n        );\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Native/BinaryOperator.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Native;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalAnd;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalOr as LogicalOrNode;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * BinaryOperator operator Node Visitor implementation.\n */\nfinal class BinaryOperator extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof LogicalAnd || $node instanceof LogicalOrNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof LogicalAnd && !$node instanceof LogicalOrNode) {\n            throw new LogicException(\n                'Implementation accepts instance of LogicalAnd or LogicalOr Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clauses = [\n            $subVisitor->visit($node->leftOperand, $subVisitor, $options),\n            $subVisitor->visit($node->rightOperand, $subVisitor, $options),\n        ];\n\n        return implode(\" {$node->token->lexeme} \", $clauses);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Native/Group.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Native;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Group as GroupNode;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Group Node Visitor implementation.\n */\nfinal class Group extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof GroupNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof GroupNode) {\n            throw new LogicException(\n                'Implementation accepts instance of Group Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clauses = [];\n\n        foreach ($node->nodes as $subNode) {\n            $clauses[] = $subVisitor->visit($subNode, $subVisitor, $options);\n        }\n\n        $clauses = implode(' ', $clauses);\n        $domainPrefix = $node->tokenLeft->domain === '' ? '' : \"{$node->tokenLeft->domain}:\";\n\n        return \"{$domainPrefix}{$node->tokenLeft->delimiter}{$clauses}{$node->tokenRight->lexeme}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Native/Phrase.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Native;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Phrase as PhraseToken;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Phrase Node Visitor implementation.\n */\nfinal class Phrase extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof Term && $node->token instanceof PhraseToken;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof Term) {\n            throw new LogicException(\n                'Implementation accepts instance of Term Node'\n            );\n        }\n\n        $token = $node->token;\n\n        if (!$token instanceof PhraseToken) {\n            throw new LogicException(\n                'Implementation accepts instance of Phrase Token'\n            );\n        }\n\n        $domainPrefix = $token->domain === '' ? '' : \"{$token->domain}:\";\n        $phraseEscaped = preg_replace(\"/([\\\\{$token->quote}])/\", '\\\\\\\\$1', $token->phrase);\n\n        return \"{$domainPrefix}{$token->quote}{$phraseEscaped}{$token->quote}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Native/Query.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Native;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Query as QueryNode;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Query Node Visitor implementation.\n */\nfinal class Query extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof QueryNode;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof QueryNode) {\n            throw new LogicException(\n                'Implementation accepts instance of Query Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clauses = [];\n\n        foreach ($node->nodes as $subNode) {\n            $clauses[] = $subVisitor->visit($subNode, $subVisitor, $options);\n        }\n\n        return implode(' ', $clauses);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Native/Tag.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Native;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Tag as TagToken;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * User Node Visitor implementation.\n */\nfinal class Tag extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof Term && $node->token instanceof TagToken;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof Term) {\n            throw new LogicException(\n                'Implementation accepts instance of Term Node'\n            );\n        }\n\n        $token = $node->token;\n\n        if (!$token instanceof TagToken) {\n            throw new LogicException(\n                'Implementation accepts instance of Tag Token'\n            );\n        }\n\n        return \"{$token->marker}{$token->tag}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Native/UnaryOperator.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Native;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalNot;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Mandatory;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Prohibited;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Unary operator Node Visitor implementation.\n */\nfinal class UnaryOperator extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof Mandatory || $node instanceof Prohibited || $node instanceof LogicalNot;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof Mandatory && !$node instanceof Prohibited && !$node instanceof LogicalNot) {\n            throw new LogicException(\n                'Implementation accepts instance of Mandatory, Prohibited or LogicalNot Node'\n            );\n        }\n\n        if ($subVisitor === null) {\n            throw new LogicException('Implementation requires sub-visitor');\n        }\n\n        $clause = $subVisitor->visit($node->operand, $subVisitor, $options);\n\n        $padding = '';\n        if ($node->token->type === Tokenizer::TOKEN_LOGICAL_NOT) {\n            $padding = ' ';\n        }\n\n        return \"{$node->token->lexeme}{$padding}{$clause}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Native/User.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Native;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\User as UserToken;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * User Node Visitor implementation.\n */\nfinal class User extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof Term && $node->token instanceof UserToken;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof Term) {\n            throw new LogicException(\n                'Implementation accepts instance of Term Node'\n            );\n        }\n\n        $token = $node->token;\n\n        if (!$token instanceof UserToken) {\n            throw new LogicException(\n                'Implementation accepts instance of User Token'\n            );\n        }\n\n        return \"{$token->marker}{$token->user}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Native/Word.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators\\Native;\n\nuse LogicException;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Word as WordToken;\nuse QueryTranslator\\Values\\Node;\n\n/**\n * Word Node Visitor implementation.\n */\nfinal class Word extends Visitor\n{\n    public function accept(Node $node)\n    {\n        return $node instanceof Term && $node->token instanceof WordToken;\n    }\n\n    public function visit(Node $node, Visitor $subVisitor = null, $options = null)\n    {\n        if (!$node instanceof Term) {\n            throw new LogicException(\n                'Implementation accepts instance of Term Node'\n            );\n        }\n\n        $token = $node->token;\n\n        if (!$token instanceof WordToken) {\n            throw new LogicException(\n                'Implementation accepts instance of Word Token'\n            );\n        }\n\n        $domainPrefix = $token->domain === '' ? '' : \"{$token->domain}:\";\n        $wordEscaped = preg_replace('/([\\\\\\'\"+\\-!():#@ ])/', '\\\\\\\\$1', $token->word);\n\n        return \"{$domainPrefix}{$wordEscaped}\";\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/Native.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators;\n\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Values\\SyntaxTree;\n\n/**\n * Native Galach generator generates query string in Galach format.\n */\nfinal class Native\n{\n    /**\n     * @var \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor\n     */\n    private $visitor;\n\n    public function __construct(Visitor $visitor)\n    {\n        $this->visitor = $visitor;\n    }\n\n    /**\n     * Generate query string in Galach format from the given $syntaxTree.\n     *\n     * @param \\QueryTranslator\\Values\\SyntaxTree $syntaxTree\n     *\n     * @return string\n     */\n    public function generate(SyntaxTree $syntaxTree)\n    {\n        return $this->visitor->visit($syntaxTree->rootNode);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Generators/QueryString.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Generators;\n\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Values\\SyntaxTree;\n\n/**\n * QueryString generator generates query string in Elasticsearch Query String Query format.\n *\n * @link https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html\n */\nfinal class QueryString\n{\n    /**\n     * @var \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor\n     */\n    private $visitor;\n\n    public function __construct(Visitor $visitor)\n    {\n        $this->visitor = $visitor;\n    }\n\n    /**\n     * Generate query string in Elasticsearch Query String Query format from the given $syntaxTree.\n     *\n     * @param \\QueryTranslator\\Values\\SyntaxTree $syntaxTree\n     * @param mixed $options\n     *\n     * @return string\n     */\n    public function generate(SyntaxTree $syntaxTree, $options = null)\n    {\n        return $this->visitor->visit($syntaxTree->rootNode, null, $options);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Parser.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach;\n\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Group;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalAnd;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalNot;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalOr;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Mandatory;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Prohibited;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Query;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Parsing;\nuse QueryTranslator\\Values\\Correction;\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\SyntaxTree;\nuse QueryTranslator\\Values\\Token;\nuse QueryTranslator\\Values\\TokenSequence;\nuse SplStack;\n\n/**\n * Galach implementation of the Parsing interface.\n */\nfinal class Parser implements Parsing\n{\n    /**\n     * Parser ignored adjacent unary operator preceding another operator.\n     */\n    const CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED = 0;\n\n    /**\n     * Parser ignored unary operator missing an operand.\n     */\n    const CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED = 1;\n\n    /**\n     * Parser ignored binary operator missing left side operand.\n     */\n    const CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED = 2;\n\n    /**\n     * Parser ignored binary operator missing right side operand.\n     */\n    const CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED = 3;\n\n    /**\n     * Parser ignored binary operator following another operator and connecting operators.\n     */\n    const CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED = 4;\n\n    /**\n     * Parser ignored logical not operators preceding mandatory or prohibited operator.\n     */\n    const CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED = 5;\n\n    /**\n     * Parser ignored empty group and connecting operators.\n     */\n    const CORRECTION_EMPTY_GROUP_IGNORED = 6;\n\n    /**\n     * Parser ignored unmatched left side group delimiter.\n     */\n    const CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED = 7;\n\n    /**\n     * Parser ignored unmatched right side group delimiter.\n     */\n    const CORRECTION_UNMATCHED_GROUP_RIGHT_DELIMITER_IGNORED = 8;\n\n    /**\n     * Parser ignored bailout type token.\n     *\n     * @see \\QueryTranslator\\Languages\\Galach\\Tokenizer::TOKEN_BAILOUT\n     */\n    const CORRECTION_BAILOUT_TOKEN_IGNORED = 9;\n\n    private static $tokenShortcuts = [\n        'operatorNot' => Tokenizer::TOKEN_LOGICAL_NOT | Tokenizer::TOKEN_LOGICAL_NOT_2,\n        'operatorPreference' => Tokenizer::TOKEN_MANDATORY | Tokenizer::TOKEN_PROHIBITED,\n        'operatorPrefix' => Tokenizer::TOKEN_MANDATORY | Tokenizer::TOKEN_PROHIBITED | Tokenizer::TOKEN_LOGICAL_NOT_2,\n        'operatorUnary' => Tokenizer::TOKEN_MANDATORY | Tokenizer::TOKEN_PROHIBITED | Tokenizer::TOKEN_LOGICAL_NOT | Tokenizer::TOKEN_LOGICAL_NOT_2,\n        'operatorBinary' => Tokenizer::TOKEN_LOGICAL_AND | Tokenizer::TOKEN_LOGICAL_OR,\n        'operator' => Tokenizer::TOKEN_LOGICAL_AND | Tokenizer::TOKEN_LOGICAL_OR | Tokenizer::TOKEN_MANDATORY | Tokenizer::TOKEN_PROHIBITED | Tokenizer::TOKEN_LOGICAL_NOT | Tokenizer::TOKEN_LOGICAL_NOT_2,\n        'groupDelimiter' => Tokenizer::TOKEN_GROUP_BEGIN | Tokenizer::TOKEN_GROUP_END,\n        'binaryOperatorAndWhitespace' => Tokenizer::TOKEN_LOGICAL_AND | Tokenizer::TOKEN_LOGICAL_OR | Tokenizer::TOKEN_WHITESPACE,\n    ];\n\n    private static $shifts = [\n        Tokenizer::TOKEN_WHITESPACE => 'shiftWhitespace',\n        Tokenizer::TOKEN_TERM => 'shiftTerm',\n        Tokenizer::TOKEN_GROUP_BEGIN => 'shiftGroupBegin',\n        Tokenizer::TOKEN_GROUP_END => 'shiftGroupEnd',\n        Tokenizer::TOKEN_LOGICAL_AND => 'shiftBinaryOperator',\n        Tokenizer::TOKEN_LOGICAL_OR => 'shiftBinaryOperator',\n        Tokenizer::TOKEN_LOGICAL_NOT => 'shiftLogicalNot',\n        Tokenizer::TOKEN_LOGICAL_NOT_2 => 'shiftLogicalNot2',\n        Tokenizer::TOKEN_MANDATORY => 'shiftPreference',\n        Tokenizer::TOKEN_PROHIBITED => 'shiftPreference',\n        Tokenizer::TOKEN_BAILOUT => 'shiftBailout',\n    ];\n\n    private static $nodeToReductionGroup = [\n        Group::class => 'group',\n        LogicalAnd::class => 'logicalAnd',\n        LogicalOr::class => 'logicalOr',\n        LogicalNot::class => 'unaryOperator',\n        Mandatory::class => 'unaryOperator',\n        Prohibited::class => 'unaryOperator',\n        Term::class => 'term',\n    ];\n\n    private static $reductionGroups = [\n        'group' => [\n            'reduceGroup',\n            'reducePreference',\n            'reduceLogicalNot',\n            'reduceLogicalAnd',\n            'reduceLogicalOr',\n        ],\n        'unaryOperator' => [\n            'reduceLogicalNot',\n            'reduceLogicalAnd',\n            'reduceLogicalOr',\n        ],\n        'logicalOr' => [],\n        'logicalAnd' => [\n            'reduceLogicalOr',\n        ],\n        'term' => [\n            'reducePreference',\n            'reduceLogicalNot',\n            'reduceLogicalAnd',\n            'reduceLogicalOr',\n        ],\n    ];\n\n    /**\n     * Input tokens.\n     *\n     * @var \\QueryTranslator\\Values\\Token[]\n     */\n    private $tokens;\n\n    /**\n     * Query stack.\n     *\n     * @var \\SplStack\n     */\n    private $stack;\n\n    /**\n     * An array of applied corrections.\n     *\n     * @var \\QueryTranslator\\Values\\Correction[]\n     */\n    private $corrections = [];\n\n    public function parse(TokenSequence $tokenSequence)\n    {\n        $this->init($tokenSequence->tokens);\n\n        while (!empty($this->tokens)) {\n            $node = $this->shift();\n\n            if ($node instanceof Node) {\n                $this->reduce($node);\n            }\n        }\n\n        $this->reduceQuery();\n\n        return new SyntaxTree($this->stack->top(), $tokenSequence, $this->corrections);\n    }\n\n    private function shift()\n    {\n        $token = array_shift($this->tokens);\n        $shift = self::$shifts[$token->type];\n\n        return $this->{$shift}($token);\n    }\n\n    private function reduce(Node $node)\n    {\n        $previousNode = null;\n        $reductionIndex = null;\n\n        while ($node instanceof Node) {\n            // Reset reduction index on first iteration or on Node change\n            if ($node !== $previousNode) {\n                $reductionIndex = 0;\n            }\n\n            // If there are no reductions to try, put the Node on the stack\n            // and continue shifting\n            $reduction = $this->getReduction($node, $reductionIndex);\n            if ($reduction === null) {\n                $this->stack->push($node);\n                break;\n            }\n\n            $previousNode = $node;\n            $node = $this->{$reduction}($node);\n            ++$reductionIndex;\n        }\n    }\n\n    protected function shiftWhitespace()\n    {\n        if ($this->isTopStackToken(self::$tokenShortcuts['operatorPrefix'])) {\n            $this->addCorrection(\n                self::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED,\n                $this->stack->pop()\n            );\n        }\n    }\n\n    protected function shiftPreference(Token $token)\n    {\n        return $this->shiftAdjacentUnaryOperator($token, self::$tokenShortcuts['operator']);\n    }\n\n    protected function shiftAdjacentUnaryOperator(Token $token, $tokenMask)\n    {\n        if ($this->isToken(reset($this->tokens), $tokenMask)) {\n            $this->addCorrection(\n                self::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED,\n                $token\n            );\n\n            return null;\n        }\n\n        $this->stack->push($token);\n    }\n\n    protected function shiftLogicalNot(Token $token)\n    {\n        $this->stack->push($token);\n    }\n\n    protected function shiftLogicalNot2(Token $token)\n    {\n        $tokenMask = self::$tokenShortcuts['operator'] & ~Tokenizer::TOKEN_LOGICAL_NOT_2;\n\n        return $this->shiftAdjacentUnaryOperator($token, $tokenMask);\n    }\n\n    protected function shiftBinaryOperator(Token $token)\n    {\n        if ($this->stack->isEmpty() || $this->isTopStackToken(Tokenizer::TOKEN_GROUP_BEGIN)) {\n            $this->addCorrection(\n                self::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED,\n                $token\n            );\n\n            return null;\n        }\n\n        if ($this->isTopStackToken(self::$tokenShortcuts['operator'])) {\n            $this->ignoreBinaryOperatorFollowingOperator($token);\n\n            return null;\n        }\n\n        $this->stack->push($token);\n    }\n\n    private function ignoreBinaryOperatorFollowingOperator(Token $token)\n    {\n        $precedingOperators = $this->ignorePrecedingOperators(self::$tokenShortcuts['operator']);\n        $followingOperators = $this->ignoreFollowingOperators();\n\n        $this->addCorrection(\n            self::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED,\n            ...array_merge(\n                $precedingOperators,\n                [$token],\n                $followingOperators\n            )\n        );\n    }\n\n    protected function shiftTerm(Token $token)\n    {\n        return new Term($token);\n    }\n\n    protected function shiftGroupBegin(Token $token)\n    {\n        $this->stack->push($token);\n    }\n\n    protected function shiftGroupEnd(Token $token)\n    {\n        $this->stack->push($token);\n\n        return new Group();\n    }\n\n    protected function shiftBailout(Token $token)\n    {\n        $this->addCorrection(self::CORRECTION_BAILOUT_TOKEN_IGNORED, $token);\n    }\n\n    protected function reducePreference(Node $node)\n    {\n        if (!$this->isTopStackToken(self::$tokenShortcuts['operatorPreference'])) {\n            return $node;\n        }\n\n        $token = $this->stack->pop();\n\n        if ($this->isToken($token, Tokenizer::TOKEN_MANDATORY)) {\n            return new Mandatory($node, $token);\n        }\n\n        return new Prohibited($node, $token);\n    }\n\n    protected function reduceLogicalNot(Node $node)\n    {\n        if (!$this->isTopStackToken(self::$tokenShortcuts['operatorNot'])) {\n            return $node;\n        }\n\n        if ($node instanceof Mandatory || $node instanceof Prohibited) {\n            $this->ignoreLogicalNotOperatorsPrecedingPreferenceOperator();\n\n            return $node;\n        }\n\n        return new LogicalNot($node, $this->stack->pop());\n    }\n\n    public function ignoreLogicalNotOperatorsPrecedingPreferenceOperator()\n    {\n        $precedingOperators = $this->ignorePrecedingOperators(self::$tokenShortcuts['operatorNot']);\n\n        if (!empty($precedingOperators)) {\n            $this->addCorrection(\n                self::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED,\n                ...$precedingOperators\n            );\n        }\n    }\n\n    protected function reduceLogicalAnd(Node $node)\n    {\n        if ($this->stack->count() <= 1 || !$this->isTopStackToken(Tokenizer::TOKEN_LOGICAL_AND)) {\n            return $node;\n        }\n\n        $token = $this->stack->pop();\n        $leftOperand = $this->stack->pop();\n\n        return new LogicalAnd($leftOperand, $node, $token);\n    }\n\n    /**\n     * Reduce logical OR.\n     *\n     * @param \\QueryTranslator\\Values\\Node $node\n     * @param bool $inGroup Reduce inside a group\n     *\n     * @return null|\\QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalOr|\\QueryTranslator\\Values\\Node\n     */\n    protected function reduceLogicalOr(Node $node, $inGroup = false)\n    {\n        if ($this->stack->count() <= 1 || !$this->isTopStackToken(Tokenizer::TOKEN_LOGICAL_OR)) {\n            return $node;\n        }\n\n        // If inside a group don't look for following logical AND\n        if (!$inGroup) {\n            $this->popWhitespace();\n            // If the next token is logical AND, put the node on stack\n            // as that has precedence over logical OR\n            if ($this->isToken(reset($this->tokens), Tokenizer::TOKEN_LOGICAL_AND)) {\n                $this->stack->push($node);\n\n                return null;\n            }\n        }\n\n        $token = $this->stack->pop();\n        $leftOperand = $this->stack->pop();\n\n        return new LogicalOr($leftOperand, $node, $token);\n    }\n\n    protected function reduceGroup(Group $group)\n    {\n        $rightDelimiter = $this->stack->pop();\n\n        // Pop dangling tokens\n        $this->popTokens(~Tokenizer::TOKEN_GROUP_BEGIN);\n\n        if ($this->isTopStackToken(Tokenizer::TOKEN_GROUP_BEGIN)) {\n            $leftDelimiter = $this->stack->pop();\n            $this->ignoreEmptyGroup($leftDelimiter, $rightDelimiter);\n            $this->reduceRemainingLogicalOr(true);\n\n            return null;\n        }\n\n        $this->reduceRemainingLogicalOr(true);\n\n        $group->nodes = $this->collectTopStackNodes();\n        $group->tokenLeft = $this->stack->pop();\n        $group->tokenRight = $rightDelimiter;\n\n        return $group;\n    }\n\n    /**\n     * Collect all Nodes from the top of the stack.\n     *\n     * @return \\QueryTranslator\\Values\\Node[]\n     */\n    private function collectTopStackNodes()\n    {\n        $nodes = [];\n\n        while (!$this->stack->isEmpty() && $this->stack->top() instanceof Node) {\n            array_unshift($nodes, $this->stack->pop());\n        }\n\n        return $nodes;\n    }\n\n    private function ignoreEmptyGroup(Token $leftDelimiter, Token $rightDelimiter)\n    {\n        $precedingOperators = $this->ignorePrecedingOperators(self::$tokenShortcuts['operator']);\n        $followingOperators = $this->ignoreFollowingOperators();\n\n        $this->addCorrection(\n            self::CORRECTION_EMPTY_GROUP_IGNORED,\n            ...array_merge(\n                $precedingOperators,\n                [$leftDelimiter, $rightDelimiter],\n                $followingOperators\n            )\n        );\n    }\n\n    /**\n     * Initialize the parser with given array of $tokens.\n     *\n     * @param \\QueryTranslator\\Values\\Token[] $tokens\n     */\n    private function init(array $tokens)\n    {\n        $this->corrections = [];\n        $this->tokens = $tokens;\n        $this->cleanupGroupDelimiters($this->tokens);\n        $this->stack = new SplStack();\n    }\n\n    private function getReduction(Node $node, $reductionIndex)\n    {\n        $reductionGroup = self::$nodeToReductionGroup[get_class($node)];\n\n        if (isset(self::$reductionGroups[$reductionGroup][$reductionIndex])) {\n            return self::$reductionGroups[$reductionGroup][$reductionIndex];\n        }\n\n        return null;\n    }\n\n    private function reduceQuery()\n    {\n        $this->popTokens();\n        $this->reduceRemainingLogicalOr();\n        $nodes = [];\n\n        while (!$this->stack->isEmpty()) {\n            array_unshift($nodes, $this->stack->pop());\n        }\n\n        $this->stack->push(new Query($nodes));\n    }\n\n    /**\n     * Check if the given $token is an instance of Token.\n     *\n     * Optionally also checks given Token $typeMask.\n     *\n     * @param mixed $token\n     * @param int $typeMask\n     *\n     * @return bool\n     */\n    private function isToken($token, $typeMask = null)\n    {\n        if (!$token instanceof Token) {\n            return false;\n        }\n\n        if (null === $typeMask || $token->type & $typeMask) {\n            return true;\n        }\n\n        return false;\n    }\n\n    private function isTopStackToken($type = null)\n    {\n        return !$this->stack->isEmpty() && $this->isToken($this->stack->top(), $type);\n    }\n\n    /**\n     * Remove whitespace Tokens from the beginning of the token array.\n     */\n    private function popWhitespace()\n    {\n        while ($this->isToken(reset($this->tokens), Tokenizer::TOKEN_WHITESPACE)) {\n            array_shift($this->tokens);\n        }\n    }\n\n    /**\n     * Remove all Tokens from the top of the query stack and log Corrections as necessary.\n     *\n     * Optionally also checks that Token matches given $typeMask.\n     *\n     * @param int $typeMask\n     */\n    private function popTokens($typeMask = null)\n    {\n        while ($this->isTopStackToken($typeMask)) {\n            $token = $this->stack->pop();\n            if ($token->type & self::$tokenShortcuts['operatorUnary']) {\n                $this->addCorrection(\n                    self::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED,\n                    $token\n                );\n            } else {\n                $this->addCorrection(\n                    self::CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED,\n                    $token\n                );\n            }\n        }\n    }\n\n    private function ignorePrecedingOperators($type)\n    {\n        $tokens = [];\n        while ($this->isTopStackToken($type)) {\n            array_unshift($tokens, $this->stack->pop());\n        }\n\n        return $tokens;\n    }\n\n    private function ignoreFollowingOperators()\n    {\n        $tokenMask = self::$tokenShortcuts['binaryOperatorAndWhitespace'];\n        $tokens = [];\n        while ($this->isToken(reset($this->tokens), $tokenMask)) {\n            $token = array_shift($this->tokens);\n            if ($token->type & self::$tokenShortcuts['operatorBinary']) {\n                $tokens[] = $token;\n            }\n        }\n\n        return $tokens;\n    }\n\n    /**\n     * Reduce logical OR possibly remaining after reaching end of group or query.\n     *\n     * @param bool $inGroup Reduce inside a group\n     */\n    private function reduceRemainingLogicalOr($inGroup = false)\n    {\n        if (!$this->stack->isEmpty() && !$this->isTopStackToken()) {\n            $node = $this->reduceLogicalOr($this->stack->pop(), $inGroup);\n            $this->stack->push($node);\n        }\n    }\n\n    /**\n     * Clean up group delimiter tokens, removing unmatched left and right delimiter.\n     *\n     * Closest group delimiters will be matched first, unmatched remainder is removed.\n     *\n     * @param \\QueryTranslator\\Values\\Token[] $tokens\n     */\n    private function cleanupGroupDelimiters(array &$tokens)\n    {\n        $indexes = $this->getUnmatchedGroupDelimiterIndexes($tokens);\n\n        while (!empty($indexes)) {\n            $lastIndex = array_pop($indexes);\n            $token = $tokens[$lastIndex];\n            unset($tokens[$lastIndex]);\n\n            if ($token->type === Tokenizer::TOKEN_GROUP_BEGIN) {\n                $this->addCorrection(\n                    self::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED,\n                    $token\n                );\n            } else {\n                $this->addCorrection(\n                    self::CORRECTION_UNMATCHED_GROUP_RIGHT_DELIMITER_IGNORED,\n                    $token\n                );\n            }\n        }\n    }\n\n    private function getUnmatchedGroupDelimiterIndexes(array &$tokens)\n    {\n        $trackLeft = [];\n        $trackRight = [];\n\n        foreach ($tokens as $index => $token) {\n            if (!$this->isToken($token, self::$tokenShortcuts['groupDelimiter'])) {\n                continue;\n            }\n\n            if ($this->isToken($token, Tokenizer::TOKEN_GROUP_BEGIN)) {\n                $trackLeft[] = $index;\n                continue;\n            }\n\n            if (empty($trackLeft)) {\n                $trackRight[] = $index;\n            } else {\n                array_pop($trackLeft);\n            }\n        }\n\n        return array_merge($trackLeft, $trackRight);\n    }\n\n    private function addCorrection($type, Token ...$tokens)\n    {\n        $this->corrections[] = new Correction($type, ...$tokens);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/README.md",
    "content": "# Galach query language\n\nTo better understand parts of the language processor described below, run the demo:\n\n1. Create the demo project using composer `composer create-project netgen/query-translator-demo`\n2. Position into the demo project directory `cd query-translator-demo`\n3. Start the web server with `src` as the document root `php -S localhost:8005 -t src`\n4. Open [http://localhost:8005](http://localhost:8005) in your browser\n\nThe demo will present behavior of Query Translator in an interactive way.\n\n### Syntax\n\nGalach is based on a syntax that seems to be the unofficial standard for search query as user input.\nIt should feel familiar, as the same basic syntax is used by any popular text-based search engine\nout there. It is also very similar to\n[Lucene Query Parser syntax](https://lucene.apache.org/core/2_9_4/queryparsersyntax.html), used by\nboth Solr and Elasticsearch.\n\nRead about it more detail in the [syntax documentation](SYNTAX.md), here we'll only show a quick\ncheat sheet:\n\n`word` `\"phrase\"` `(group)` `+mandatory` `-prohibited` `AND` `&&` `OR` `||` `NOT` `!` `#tag` `@user`\n`domain:term`\n\nAnd an example:\n\n```\ncheese AND (bacon OR eggs) +type:breakfast\n```\n\n### How it works\n\nThe implementation has some of the usual language processor phases, starting with the lexical\nanalysis in [Tokenizer](Tokenizer.php), followed by the syntax analysis in [Parser](Parser.php), and\nending with the target code generation in a [Generator](Generators). The output of the Parser is a\nhierarchical tree structure. It represents the syntax of the query in an abstract way and is easy to\nprocess using [tree traversal](https://en.wikipedia.org/wiki/Tree_traversal). From that syntax tree,\na target output is generated.\n\nWhen broken into parts, we have a sequence like this:\n\n1. User writes a query string\n2. Query string is given to Tokenizer which produces an instance of\n[TokenSequence](../../Values/TokenSequence.php)\n3. TokenSequence instance is given to Parser which produces an instance of\n[SyntaxTree](../../Values/SyntaxTree.php)\n4. SyntaxTree instance is given to the Generator to produce a target output\n5. Target output is passed to its consumer\n\nHere's how that would look in code:\n\n```php\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor\\Full as FullTokenExtractor;\nuse QueryTranslator\\Languages\\Galach\\Parser;\nuse QueryTranslator\\Languages\\Galach\\Generators;\n\n// 1. User writes a query string\n\n$queryString = $_GET['query_string'];\n\n// This is the place where you would perform some sanity checks that are out of the scope\n// of this library, for example, checking the length of the query string\n\n// 2. Query string is given to Tokenizer which produces an instance of TokenSequence\n\n// Note that Tokenizer needs a TokenExtractor, which is an extension point\n// Here we use Full TokenExtractor which provides full Galach syntax\n\n$tokenExtractor = new FullTokenExtractor();\n$tokenizer = new Tokenizer($tokenExtractor);\n$tokenSequence = $tokenizer->tokenize($queryString);\n\n// 3. TokenSequence instance is given to Parser which produces an instance of SyntaxTree\n\n$parser = new Parser();\n$syntaxTree = $parser->parse($tokenSequence);\n\n// If needed, here you can access corrections\n\nforeach ($syntaxTree->corrections as $correction) {\n    echo $correction->type;\n}\n \n// 4. Now we can build a generator, in this example an ExtendedDisMax generator to target\n//    Solr's Extended DisMax Query Parser\n\n// This part is a little bit more involving since we need to build all visitors for different\n// Nodes in the syntax tree\n\n$generator = new Generators\\ExtendedDisMax(\n    new Generators\\Common\\Aggregate([\n        new Generators\\Lucene\\Common\\BinaryOperator(),\n        new Generators\\Lucene\\Common\\Group(),\n        new Generators\\Lucene\\Common\\Phrase(),\n        new Generators\\Lucene\\Common\\Query(),\n        new Generators\\Lucene\\Common\\Tag(),\n        new Generators\\Lucene\\Common\\UnaryOperator(),\n        new Generators\\Lucene\\Common\\User(),\n        new Generators\\Lucene\\ExtendedDisMax\\Word(),\n    ])\n);\n\n// Now we can use the generator to generate the target output\n\n$targetString = $generator->generate($syntaxTree);\n\n// Finally we can send the generated string to Solr\n\n$result = $solrClient->search($targetString);\n```\n\n### Error handling\n\nNo input is considered invalid. Both Tokenizer and Parser are made to be resistant to errors and\nwill try to process anything you throw at them. When input does contain an error, a correction will\nbe applied. This will be repeated as necessary. The corrections are applied during parsing and are\nmade available in the SyntaxTree as an array of [Correction](../../Values/Correction.php) instances.\nThey will contain information about the type of the correction and the tokens affected by it.\n\nOne type of correction starts in the Tokenizer. When no [Token](../../Values/Token.php) can be\nextracted at a current position in the input string, a single character will be read as a special\n`Tokenizer::TOKEN_BAILOUT` type Token. All Tokens of that type will be ignored by the parser. The\nonly known case where this can happen is the occurrence of an unclosed phrase delimiter `\"`.\n\nNote that, while applying the corrections, the best efforts are made to preserve the intended\nmeaning of the query. The following is a list of corrections, with correction type constant and an\nexample of an incorrect input and a corrected result.\n\n1. Adjacent unary operator preceding another operator is ignored\n\n    `Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED`\n\n    ```\n    ++one +-two\n    ```\n    ```\n    +one -two\n    ```\n\n2. Unary operator missing an operand is ignored\n\n    `Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED`\n\n    ```\n    one NOT\n    ```\n    ```\n    one\n    ```\n\n3. Binary operator missing left side operand is ignored\n\n    `Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED`\n\n    ```\n    AND two\n    ```\n    ```\n    two\n    ```\n\n4. Binary operator missing right side operand is ignored\n\n    `Parser::CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED`\n\n    ```\n    one AND\n    ```\n    ```\n    one\n    ```\n\n5. Binary operator following another operator is ignored together with connecting operators\n\n    `Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED`\n\n    ```\n    one AND OR AND two\n    ```\n    ```\n    one two\n    ```\n\n6. Logical not operators preceding mandatory or prohibited operator are ignored\n\n    `Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED`\n\n    ```\n    NOT +one NOT -two\n    ```\n    ```\n    +one -two\n    ```\n\n7. Empty group is ignored together with connecting operators\n\n    `Parser::CORRECTION_EMPTY_GROUP_IGNORED`\n\n    ```\n    one AND () OR two\n    ```\n    ```\n    one two\n    ```\n\n8. Unmatched left side group delimiter is ignored\n\n    `Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED`\n\n    ```\n    one ( AND two\n    ```\n    ```\n    one AND two\n    ```\n\n9. Unmatched right side group delimiter is ignored\n\n    `Parser::CORRECTION_UNMATCHED_GROUP_RIGHT_DELIMITER_IGNORED`\n\n    ```\n    one AND ) two\n    ```\n    ```\n    one AND two\n    ```\n\n10. Any Token of `Tokenizer::TOKEN_BAILOUT` type is ignored\n\n    `Parser::CORRECTION_BAILOUT_TOKEN_IGNORED`\n\n    ```\n    one \" two\n    ```\n    ```\n    one two\n    ```\n\n### Customization\n\nYou can modify the Galach language in a limited way:\n\n- By changing special characters and sequences of characters used as part of the language syntax:\n    - operators: `AND` `&&` `OR` `||` `NOT` `!` `+` `-`\n    - grouping and phrase delimiters: `(` `)` `\"`\n    - user and tag markers: `@` `#`\n    - domain prefix: `domain:`\n- By choosing parts of the language that you want to use. You might want to use only a subset of the\n  full syntax, maybe without the grouping feature, using only `+` and `-` operators, disabling\n  domains, and so on.\n- By implementing custom `Tokenizer::TOKEN_TERM` type token. Read more on that in the text below.\n\nCustomization happens during the lexical analysis. The Tokenizer is actually marked as `final` and\nis not intended for extending. You will need to implement your own\n[TokenExtractor](TokenExtractor.php), a dependency to the Tokenizer. TokenExtractor controls the\nsyntax through regular expressions used to recognize the [Token](../../Values/Token.php), which is a\nsequence of characters forming the smallest syntactic unit of the language. The following is a list\nof supported Token types, together with their `Tokenizer::TOKEN_*` constants and an example:\n\n1. Term token – represents a category of term type tokens.\n\n    Note that [Word](Values/Token/Word.php) and [Phrase](Values/Token/Phrase.php) term tokens can\n    have domain prefix. This can't be used on [User](Values/Token/User.php) and\n    [Tag](Values/Token/Tag.php) term tokens, because those define implicit domains of their own.\n\n    `Tokenizer::TOKEN_TERM`\n\n    ```\n    word\n    ```\n    ```\n    title:word\n    ```\n    ```\n    \"this is a phrase\"\n    ```\n    ```\n    body:\"this is a phrase\"\n    ```\n    ```\n    @user\n    ```\n    ```\n    #tag\n    ```\n\n2. Whitespace token - represents the whitespace in the input string.\n\n    `Tokenizer::TOKEN_WHITESPACE`\n\n    ```\n    one two\n       ^\n    ```\n\n3. Logical AND token - combines two adjoining elements with logical AND.\n\n    `Tokenizer::TOKEN_LOGICAL_AND`\n\n    ```\n    one AND two\n        ^^^\n    ```\n\n4. Logical OR token - combines two adjoining elements with logical OR.\n\n    `Tokenizer::TOKEN_LOGICAL_OR`\n\n    ```\n    one OR two\n        ^^\n    ```\n\n5. Logical NOT token - applies logical NOT to the next (right-side) element.\n\n    `Tokenizer::TOKEN_LOGICAL_NOT`\n\n    ```\n    NOT one\n    ^^^\n    ```\n\n6. Shorthand logical NOT token - applies logical NOT to the next (right-side) element.\n\n    This is an alternative to the `Tokenizer::TOKEN_LOGICAL_NOT` above, with the difference that\n    parser will expect it's placed next (left) to the element it applies to, without the whitespace\n    in between.\n\n    `Tokenizer::TOKEN_LOGICAL_NOT_2`\n\n    ```\n    !one\n    ^\n    ```\n\n7. Mandatory operator - applies mandatory inclusion to the next (right side) element.\n\n    `Tokenizer::TOKEN_MANDATORY`\n\n    ```\n    +one\n    ^\n    ```\n\n8. Prohibited operator - applies mandatory exclusion to the next (right side) element.\n\n    `Tokenizer::TOKEN_PROHIBITED`\n\n    ```\n    -one\n    ^\n    ```\n\n9. Left side delimiter of a group.\n\n    Note that the left side group delimiter can have domain prefix.\n\n    `Tokenizer::TOKEN_GROUP_BEGIN`\n\n    ```\n    (one AND two)\n    ^\n    ```\n    ```\n    text:(one AND two)\n    ^^^^^^\n    ```\n\n10. Right side delimiter of a group.\n\n    `Tokenizer::TOKEN_GROUP_END`\n\n    ```\n    (one AND two)\n                ^\n    ```\n\n11. Bailout token.\n\n    `Tokenizer::TOKEN_BAILOUT`\n\n    ```\n    not exactly a phrase\"\n                        ^\n    ```\n\nBy changing the regular expressions, you can change how tokens are recognized, including special\ncharacters used as part of the language syntax. You can also omit regular expressions for some token\ntypes. Through that, you can control which elements of the language you want to use. There are two\nabstract methods to implement when extending the base [TokenExtractor](TokenExtractor.php):\n\n- `getExpressionTypeMap(): array`\n\n    Here you must return a map of regular expressions to corresponding Token types. Token type\n    can be one of the predefined constants `Tokenizer::TOKEN_*`.\n\n- `createTermToken($position, array $data): Token`\n\n    Here you receive Token data extracted through regular expression matching and a position where\n    the data was extracted at. From that, you must return the corresponding Token instance of the\n    `Tokenizer::TOKEN_TERM` type.\n\n    If needed, here you can return an instance of your own Token subtype. You can use regular\n    expressions with named capturing groups to extract meaning from the input string and pass it to\n    the constructor method.\n\nOptionally you can override the `createGroupBeginToken()` method. This is useful if you want to\ncustomize token of the `Tokenizer::TOKEN_GROUP_BEGIN` type:\n\n- `createGroupBeginToken($position, array $data): Token`\n\n    Here you receive Token data extracted through regular expression matching and a position where\n    the data was extracted at. From that, you must return the corresponding Token instance of the\n    `Tokenizer::TOKEN_GROUP_BEGIN` type.\n\n    If needed, here you can return an instance of your own Token subtype. You can use regular\n    expressions with named capturing groups to extract meaning from the input string and pass it to\n    the constructor method.\n\nTwo TokenExtractor implementations are provided out of the box. You can use them as an example and a\nstarting point to implement your own. These are:\n\n- [Full](TokenExtractor/Full.php) TokenExtractor, supports full syntax of the language\n- [Text](TokenExtractor/Text.php) TokenExtractor, supports text related subset of the language\n\n#### Parser\n\nThe Parser is the core of the library. It's marked as `final` and is not intended for extending.\nMethod `Parser::parse()` accepts TokenSequence, but it only cares about the type of the Token, so it\nwill be oblivious to any customizations you might do in the Tokenizer. That includes both\nrecognizing only a subset of the full syntax and the custom `Tokenizer::TOKEN_TERM` type tokens.\nWhile it's possible to implement a custom Parser, at that point you should consider calling it a new\nlanguage rather than a customization of Galach.\n\n### Generators\n\nA generator is used to generate the target output from the SyntaxTree. Three different ones are\nprovided out of the box:\n\n1. [Native](Generators/Native.php)\n\n   `Native` generator produces query string in the Galach format. This is mostly useful as an\n   example and for the cleanup of the user input. In case the corrections were applied to the input,\n   the output will be corrected. Also, it will not contain any superfluous whitespace and special\n   characters will be explicitly escaped.\n\n2. [ExtendedDisMax](Generators/ExtendedDisMax.php)\n\n   Output of `ExtendedDisMax` generator is intended for the `q` parameter of the\n   [Solr Extended DisMax Query Parser](https://cwiki.apache.org/confluence/display/solr/The+Extended+DisMax+Query+Parser).\n\n3. [QueryString](Generators/QueryString.php)\n\n   Output of `QueryString` generator is intended for the `query` parameter of the\n   [Elasticsearch Query String Query](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html).\n\nAll generators use the same hierarchical [Visitor](Generators/Common/Visitor.php) pattern. Each\nconcrete [Node](../../Values/Node.php) instance has its own visitor, dispatched by checking on the\nclass it implements. This enables customization per Node visitor. Since Term Node can cover\ndifferent Term tokens (including your custom ones), Term visitors should be dispatched both by the\nNode instance and the type of Token it aggregates. The visit method also propagates optional\n`$options` parameter. If needed, it can be used to control the behavior of the generator from the\noutside.\n\nThis approach should be useful for most custom implementations.\n\nNote that the Generator interface is not provided. That is because the generator's output can't be\nassumed, because it's specific to the intended target. The main job of the Query Translator is\nproducing the syntax tree from which it's easy to generate anything you might need. Following from\nthat - if the provided generators don't meet your needs, feel free to customize them or implement\nyour own.\n"
  },
  {
    "path": "lib/Languages/Galach/SYNTAX.md",
    "content": "# Galach query language syntax\n\n## Terms\n\n1. `Word` term is a string not containing whitespace, unless that whitespace is escaped.\n\n    ```\n    word\n    ```\n    ```\n    another\\ word\n    ```\n\n2. `Phrase` term is formed by enclosing words within double quotation marks `\"`.\n\n    ```\n    \"reality exists\"\n    ```\n    ```\n    \"what's not real doesn't exist\"\n    ```\n\n3. `User` term is defined by the leading `@` character, followed by at least one alphanumeric or\n    underscore character, followed by an arbitrary sequence of alphanumeric characters, hyphens,\n    underscores, and dots.\n\n    Regular expression:\n\n    ```\n    @[a-zA-Z0-9_][a-zA-Z0-9_\\-.]*\n    ```\n\n    Examples:\n\n    ```\n    @joe.watt\n    ```\n    ```\n    @_alice83\n    ```\n    ```\n    @The-Ronald\n    ```\n\n4. `Tag` term is defined by the leading `#` character, followed by at least one alphanumeric or\n    underscore character, followed by an arbitrary sequence of alphanumeric characters, hyphens,\n    underscores, and dots.\n\n    Regular expression:\n\n    ```\n    \\#[a-zA-Z0-9_][a-zA-Z0-9_\\-.]*\n    ```\n\n    Examples:\n\n    ```\n    #php\n    ```\n    ```\n    #PHP-7.1\n    ```\n    ```\n    #query_parser\n    ```\n\n## Operators\n\nTerms can be combined or modified using binary and unary operators:\n\n1. `Logical and` is a binary operator that combines left and right operands so that both must\n    match.\n\n    It comes in two forms: `AND`, `&&`\n\n    In both cases, it must be separated from its operands by whitespace.\n\n    ```\n    coffee AND milk\n    ```\n    ```\n    tea && lemon\n    ```\n\n2. `Logical or` is a binary operator that combines left and right operands so that at least one of\n    them has to match.\n\n    It comes in two forms: `OR`, `||`\n\n    In both cases, it must be separated from its operands by whitespace.\n\n    ```\n    potato OR tomato\n    ```\n    ```\n    true || false\n    ```\n\n3. `Logical not` is a unary operator that modifies its operand so that it must not match.\n\n    It comes in two forms: `NOT`, `!`\n\n    When `NOT` form is used, it must be separated from its operand by whitespace:\n\n    ```\n    NOT important\n    ```\n\n    When shorthand form `!` is used, it must be adjacent to its operand:\n\n    ```\n    !important\n    ```\n\n4. `Mandatory` is a unary operator that modifies its operand so that it must match.\n    It's represented by a plus sign `+` and must be placed adjacent to its operand.\n\n    ```\n    +coffee\n    ```\n\n5. `Prohibited` is a unary operator that modifies its operand so that it must not match.\n    It's represented by a minus sign `-` and must be placed adjacent to its operand.\n\n    ```\n    -cake\n    ```\n\n### Operator precedence\n\nUnary operators are applied first. Since they apply to the first element to the left, they never\nconflict. They are followed by binary operators, with `Logical and` preceding `Logical or`:\n\n1. `Logical not`, `Mandatory`, `Prohibited`\n2. `Logical and`\n3. `Logical or`\n\n## Grouping\n\nTerms and expressions can be grouped using round brackets. A group is processed as a whole. The\nfollowing two examples will be processed as the same since grouping follows operator precedence:\n\n```\none OR NOT two AND three\n```\n```\none OR ((NOT two) AND three)\n```\n\nBut you can also use grouping to change the meaning that would follow from operator precedence:\n\n```\n(one OR NOT two) AND three\n```\n```\none OR NOT (two AND three)\n```\n\n## Domains\n\nDomain is an abstract category on which the term or group applies. It's defined by prefixing the\nterm or group with a domain string, followed by a colon `:`. Domain string must start with at least\none alphanumeric or underscore character and is followed by an arbitrary sequence of alphanumeric\ncharacters, hyphens `-`, underscores `_` and dots `.`.\n\nNote that the domain cannot be used on `Tag` and `User` terms. These two, in fact, define implicit\ndomains of their own.\n\nRegular expression for domain string:\n\n```\n[a-zA-Z_][a-zA-Z0-9_\\-.]*\n```\n\nExamples:\n\n```\ntype:aeroplane\n```\n```\ntitle:\"Language processor\"\n```\n```\ndescription:(wings AND propeller)\n```\n\n## Special characters\n\nThe characters that are part of the language syntax must be escaped in order not to be recognized as\nsuch by the engine. These are:\n\n- `(` left paren\n- `)` right paren\n- `+` plus\n- `-` minus\n- `!` exclamation mark\n- `\"` double quote\n- `#` hash\n- `@` at sign\n- `:` colon\n- `\\` backslash\n- `␣` blank space\n\nCharacter used for escaping is backslash `\\`:\n\n```\njoined\\ word\n```\n```\n\"escaped \\\"double quote\\\"\"\n```\n```\nescaped \\+operator domain\\:word \\@user \\#tag \\(and so on\\)\n```\n```\ndouble backslash \\\\ is a backslash escaped\n```\n\nAside from the quotation marks themselves, escaping is not required inside phrases. Since quotes are\nused as delimiters, everything between them is taken as-is. Hence these will be interpreted as equal\nin meaning:\n\n```\n\"+one -two\"\n```\n```\n\"\\+one \\-two\"\n```\n\nIn some cases the tokenizer will automatically assume that a special character is to be interpreted\nas if it was escaped. The following pairs will be processed as the same:\n\n1. Colon at the end of a `Word` is considered part of the `Word`\n\n   ```\n   word:\n   ```\n   ```\n   word\\:\n   ```\n\n2. Colon placed after a domain colon is considered part of the `Word`\n\n   ```\n   domain:domain:domain\n   ```\n   ```\n   domain:domain\\:domain\n   ```\n\n3. Domain can't be used on a `Tag` and `User` terms\n\n   ```\n   domain:#tag domain:@user\n   ```\n   ```\n   domain:\\#tag domain:\\@user\n   ```\n\n4. Characters used for `Mandatory`, `Prohibited` and shorthand `Logical not` operators can be\n   considered part of the `Word`:\n\n   - When placed after domain colon\n\n      ```\n      domain:+word domain:-word domain:!word\n      ```\n      ```\n      domain:\\+word domain:\\-word domain:\\!word\n      ```\n\n   - When placed in the middle of the word\n\n      ```\n      one+two one-two one!two\n      ```\n      ```\n      one\\+two one\\-two one\\!two\n      ```\n\n   - When placed at the end of the `Word`\n\n      ```\n      one+ two- three!\n      ```\n      ```\n      one\\+ two\\- three\\!\n      ```\n"
  },
  {
    "path": "lib/Languages/Galach/TokenExtractor/Full.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\TokenExtractor;\n\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor;\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Phrase;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Tag;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\User;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Word;\nuse RuntimeException;\n\n/**\n * Full implementation of the Galach token extractor.\n *\n * Supports all features of the language.\n */\nfinal class Full extends TokenExtractor\n{\n    /**\n     * Map of regex expressions to Token types.\n     *\n     * @var array\n     */\n    private static $expressionTypeMap = [\n        '/(?<lexeme>[\\s]+)/Au' => Tokenizer::TOKEN_WHITESPACE,\n        '/(?<lexeme>\\+)/Au' => Tokenizer::TOKEN_MANDATORY,\n        '/(?<lexeme>-)/Au' => Tokenizer::TOKEN_PROHIBITED,\n        '/(?<lexeme>!)/Au' => Tokenizer::TOKEN_LOGICAL_NOT_2,\n        '/(?<lexeme>\\))/Au' => Tokenizer::TOKEN_GROUP_END,\n        '/(?<lexeme>NOT)(?:[\\s\"()+\\-!]|$)/Au' => Tokenizer::TOKEN_LOGICAL_NOT,\n        '/(?<lexeme>(?:AND|&&))(?:[\\s\"()+\\-!]|$)/Au' => Tokenizer::TOKEN_LOGICAL_AND,\n        '/(?<lexeme>(?:OR|\\|\\|))(?:[\\s\"()+\\-!]|$)/Au' => Tokenizer::TOKEN_LOGICAL_OR,\n        '/(?<lexeme>(?:(?<domain>[a-zA-Z_][a-zA-Z0-9_\\-.]*):)?(?<delimiter>\\())/Au' => Tokenizer::TOKEN_GROUP_BEGIN,\n        '/(?<lexeme>(?:(?<marker>(?<!\\\\\\\\)\\#)(?<tag>[a-zA-Z0-9_][a-zA-Z0-9_\\-.]*)))(?:[\\s\"()+!]|$)/Au' => Tokenizer::TOKEN_TERM,\n        '/(?<lexeme>(?:(?<marker>(?<!\\\\\\\\)@)(?<user>[a-zA-Z0-9_][a-zA-Z0-9_\\-.]*)))(?:[\\s\"()+!]|$)/Au' => Tokenizer::TOKEN_TERM,\n        '/(?<lexeme>(?:(?<domain>[a-zA-Z_][a-zA-Z0-9_\\-.]*):)?(?<quote>(?<!\\\\\\\\)[\"])(?<phrase>.*?)(?:(?<!\\\\\\\\)(?P=quote)))/Aus' => Tokenizer::TOKEN_TERM,\n        '/(?<lexeme>(?:(?<domain>[a-zA-Z_][a-zA-Z0-9_\\-.]*):)?(?<word>(?:\\\\\\\\\\\\\\\\|\\\\\\\\ |\\\\\\\\\\(|\\\\\\\\\\)|\\\\\\\\\"|[^\"()\\s])+?))(?:(?<!\\\\\\\\)[\"]|\\(|\\)|$|\\s)/Au' => Tokenizer::TOKEN_TERM,\n    ];\n\n    protected function getExpressionTypeMap()\n    {\n        return self::$expressionTypeMap;\n    }\n\n    protected function createTermToken($position, array $data)\n    {\n        $lexeme = $data['lexeme'];\n\n        switch (true) {\n            case isset($data['word']):\n                return new Word(\n                    $lexeme,\n                    $position,\n                    $data['domain'],\n                    // un-backslash special characters\n                    preg_replace('/(?:\\\\\\\\(\\\\\\\\|([\"+\\-!():#@ ])))/', '$1', $data['word'])\n                );\n            case isset($data['phrase']):\n                $quote = $data['quote'];\n\n                return new Phrase(\n                    $lexeme,\n                    $position,\n                    $data['domain'],\n                    $quote,\n                    // un-backslash quote\n                    preg_replace('/(?:\\\\\\\\([' . $quote . ']))/', '$1', $data['phrase'])\n                );\n            case isset($data['tag']):\n                return new Tag(\n                    $lexeme,\n                    $position,\n                    $data['marker'],\n                    $data['tag']\n                );\n            case isset($data['user']):\n                return new User(\n                    $lexeme,\n                    $position,\n                    $data['marker'],\n                    $data['user']\n                );\n        }\n\n        throw new RuntimeException('Could not extract term token from the given data');\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/TokenExtractor/Text.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\TokenExtractor;\n\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor;\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Phrase;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Word;\nuse RuntimeException;\n\n/**\n * Text implementation of the Galach token extractor.\n *\n * Supports text related subset of the language features.\n */\nfinal class Text extends TokenExtractor\n{\n    /**\n     * Map of regex expressions to Token types.\n     *\n     * @var array\n     */\n    private static $expressionTypeMap = [\n        '/(?<lexeme>[\\s]+)/Au' => Tokenizer::TOKEN_WHITESPACE,\n        '/(?<lexeme>\\+)/Au' => Tokenizer::TOKEN_MANDATORY,\n        '/(?<lexeme>-)/Au' => Tokenizer::TOKEN_PROHIBITED,\n        '/(?<lexeme>!)/Au' => Tokenizer::TOKEN_LOGICAL_NOT_2,\n        '/(?<lexeme>\\))/Au' => Tokenizer::TOKEN_GROUP_END,\n        '/(?<lexeme>NOT)(?:[\\s\"()+\\-!]|$)/Au' => Tokenizer::TOKEN_LOGICAL_NOT,\n        '/(?<lexeme>(?:AND|&&))(?:[\\s\"()+\\-!]|$)/Au' => Tokenizer::TOKEN_LOGICAL_AND,\n        '/(?<lexeme>(?:OR|\\|\\|))(?:[\\s\"()+\\-!]|$)/Au' => Tokenizer::TOKEN_LOGICAL_OR,\n        '/(?<lexeme>\\()/Au' => Tokenizer::TOKEN_GROUP_BEGIN,\n        '/(?<lexeme>(?<quote>(?<!\\\\\\\\)[\"])(?<phrase>.*?)(?:(?<!\\\\\\\\)(?P=quote)))/Aus' => Tokenizer::TOKEN_TERM,\n        '/(?<lexeme>(?<word>(?:\\\\\\\\\\\\\\\\|\\\\\\\\ |\\\\\\\\\\(|\\\\\\\\\\)|\\\\\\\\\"|[^\"()\\s])+?))(?:(?<!\\\\\\\\)[\"]|\\(|\\)|$|\\s)/Au' => Tokenizer::TOKEN_TERM,\n    ];\n\n    protected function getExpressionTypeMap()\n    {\n        return self::$expressionTypeMap;\n    }\n\n    protected function createTermToken($position, array $data)\n    {\n        $lexeme = $data['lexeme'];\n\n        switch (true) {\n            case isset($data['word']):\n                return new Word(\n                    $lexeme,\n                    $position,\n                    '',\n                    // un-backslash special chars\n                    preg_replace('/(?:\\\\\\\\(\\\\\\\\|([\"+\\-!() ])))/', '$1', $data['word'])\n                );\n            case isset($data['phrase']):\n                $quote = $data['quote'];\n\n                return new Phrase(\n                    $lexeme,\n                    $position,\n                    '',\n                    $quote,\n                    // un-backslash quote\n                    preg_replace('/(?:\\\\\\\\([' . $quote . ']))/', '$1', $data['phrase'])\n                );\n        }\n\n        throw new RuntimeException('Could not extract term token from the given data');\n    }\n\n    protected function createGroupBeginToken($position, array $data)\n    {\n        return new GroupBegin($data['lexeme'], $position, $data['lexeme'], '');\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/TokenExtractor.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach;\n\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin;\nuse QueryTranslator\\Values\\Token;\nuse RuntimeException;\n\n/**\n * Token extractor is used by Tokenizer to extract tokens from the input string.\n *\n * This is the abstract implementation intended to be used as an extension point.\n */\nabstract class TokenExtractor\n{\n    /**\n     * Return the token at the given $position of the $string.\n     *\n     * @throws \\RuntimeException On PCRE regex error\n     *\n     * @param string $string Input string\n     * @param int $position Position in the input string to extract from\n     *\n     * @return \\QueryTranslator\\Values\\Token Extracted token\n     */\n    final public function extract($string, $position)\n    {\n        $byteOffset = $this->getByteOffset($string, $position);\n\n        foreach ($this->getExpressionTypeMap() as $expression => $type) {\n            $success = preg_match($expression, $string, $matches, 0, $byteOffset);\n\n            if (false === $success) {\n                throw new RuntimeException('PCRE regex error code: ' . preg_last_error());\n            }\n\n            if (0 === $success) {\n                continue;\n            }\n\n            return $this->createToken($type, $position, $matches);\n        }\n\n        return new Token(\n            Tokenizer::TOKEN_BAILOUT,\n            mb_substr($string, $position, 1),\n            $position\n        );\n    }\n\n    /**\n     * Return a map of regular expressions to token types.\n     *\n     * The returned map must be an array where key is a regular expression\n     * and value is a corresponding token type. Regular expression must define\n     * named capturing group 'lexeme' that identifies part of the input string\n     * recognized as token.\n     *\n     * @return array\n     */\n    abstract protected function getExpressionTypeMap();\n\n    /**\n     * Create a term type token by the given parameters.\n     *\n     * @throw \\RuntimeException If token could not be created from the given $matches data\n     *\n     * @param int $position Position of the token in the input string\n     * @param array $data Regex match data, depends on the matched term token\n     *\n     * @return \\QueryTranslator\\Values\\Token\n     */\n    abstract protected function createTermToken($position, array $data);\n\n    /**\n     * Create a token object from the given parameters.\n     *\n     * @param int $type Token type\n     * @param int $position Position of the token in the input string\n     * @param array $data Regex match data, depends on the type of the token\n     *\n     * @return \\QueryTranslator\\Values\\Token\n     */\n    private function createToken($type, $position, array $data)\n    {\n        if ($type === Tokenizer::TOKEN_GROUP_BEGIN) {\n            return $this->createGroupBeginToken($position, $data);\n        }\n\n        if ($type === Tokenizer::TOKEN_TERM) {\n            return $this->createTermToken($position, $data);\n        }\n\n        return new Token($type, $data['lexeme'], $position);\n    }\n\n    /**\n     * Create an instance of Group token by the given parameters.\n     *\n     * @param $position\n     * @param array $data\n     *\n     * @return \\QueryTranslator\\Values\\Token\n     */\n    protected function createGroupBeginToken($position, array $data)\n    {\n        return new GroupBegin($data['lexeme'], $position, $data['delimiter'], $data['domain']);\n    }\n\n    /**\n     * Return the offset of the given $position in the input $string, in bytes.\n     *\n     * Offset in bytes is needed for preg_match $offset parameter.\n     *\n     * @param string $string\n     * @param int $position\n     *\n     * @return int\n     */\n    private function getByteOffset($string, $position)\n    {\n        return strlen(mb_substr($string, 0, $position));\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Tokenizer.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach;\n\nuse QueryTranslator\\Tokenizing;\nuse QueryTranslator\\Values\\TokenSequence;\n\n/**\n * Galach implementation of the Tokenizing interface.\n */\nfinal class Tokenizer implements Tokenizing\n{\n    /**\n     * Represents the whitespace in the input string.\n     */\n    const TOKEN_WHITESPACE = 1;\n\n    /**\n     * Combines two adjoining elements with logical AND.\n     */\n    const TOKEN_LOGICAL_AND = 2;\n\n    /**\n     * Combines two adjoining elements with logical OR.\n     */\n    const TOKEN_LOGICAL_OR = 4;\n\n    /**\n     * Applies logical NOT to the next (right-side) element.\n     */\n    const TOKEN_LOGICAL_NOT = 8;\n\n    /**\n     * Applies logical NOT to the next (right-side) element.\n     *\n     * This is an alternative to the TOKEN_LOGICAL_NOT, with the difference that\n     * parser will expect it's placed next (left) to the element it applies to,\n     * without the whitespace in between.\n     */\n    const TOKEN_LOGICAL_NOT_2 = 16;\n\n    /**\n     * Mandatory operator applies to the next (right-side) element and means\n     * that the element must be present. There must be no whitespace between it\n     * and the element it applies to.\n     */\n    const TOKEN_MANDATORY = 32;\n\n    /**\n     * Prohibited operator applies to the next (right-side) element and means\n     * that the element must not be present. There must be no whitespace between\n     * it and the element it applies to.\n     */\n    const TOKEN_PROHIBITED = 64;\n\n    /**\n     * Left side delimiter of a group.\n     *\n     * Group is used to group elements in order to form a sub-query.\n     *\n     * @see \\QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin\n     */\n    const TOKEN_GROUP_BEGIN = 128;\n\n    /**\n     * Right side delimiter of a group.\n     *\n     * Group is used to group elements in order to form a sub-query.\n     */\n    const TOKEN_GROUP_END = 256;\n\n    /**\n     * Term token type represents a category of term type tokens.\n     *\n     * This type is intended to be used as an extension point through subtyping.\n     *\n     * @see \\QueryTranslator\\Languages\\Galach\\Values\\Token\\Phrase\n     * @see \\QueryTranslator\\Languages\\Galach\\Values\\Token\\Tag\n     * @see \\QueryTranslator\\Languages\\Galach\\Values\\Token\\User\n     * @see \\QueryTranslator\\Languages\\Galach\\Values\\Token\\Word\n     */\n    const TOKEN_TERM = 512;\n\n    /**\n     * Bailout token.\n     *\n     * If token could not be recognized, next character is extracted into a\n     * token of this type. Ignored by parser.\n     */\n    const TOKEN_BAILOUT = 1024;\n\n    /**\n     * @var \\QueryTranslator\\Languages\\Galach\\TokenExtractor\n     */\n    private $tokenExtractor;\n\n    /**\n     * @param \\QueryTranslator\\Languages\\Galach\\TokenExtractor $tokenExtractor\n     */\n    public function __construct(TokenExtractor $tokenExtractor)\n    {\n        $this->tokenExtractor = $tokenExtractor;\n    }\n\n    public function tokenize($string)\n    {\n        $length = mb_strlen($string);\n        $position = 0;\n        $tokens = [];\n\n        while ($position < $length) {\n            $token = $this->tokenExtractor->extract($string, $position);\n            $position += mb_strlen($token->lexeme);\n            $tokens[] = $token;\n        }\n\n        return new TokenSequence($tokens, $string);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Node/Group.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Node;\n\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin;\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * Group Node Visitor implementation.\n */\nfinal class Group extends Node\n{\n    /**\n     * @var \\QueryTranslator\\Values\\Node[]\n     */\n    public $nodes;\n\n    /**\n     * @var \\QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin\n     */\n    public $tokenLeft;\n\n    /**\n     * @var \\QueryTranslator\\Values\\Token\n     */\n    public $tokenRight;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Node[] $nodes\n     * @param \\QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin $tokenLeft\n     * @param \\QueryTranslator\\Values\\Token $tokenRight\n     */\n    public function __construct(\n        array $nodes = [],\n        GroupBegin $tokenLeft = null,\n        Token $tokenRight = null\n    ) {\n        $this->nodes = $nodes;\n        $this->tokenLeft = $tokenLeft;\n        $this->tokenRight = $tokenRight;\n    }\n\n    public function getNodes()\n    {\n        return $this->nodes;\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Node/LogicalAnd.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Node;\n\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\nfinal class LogicalAnd extends Node\n{\n    /**\n     * @var \\QueryTranslator\\Values\\Node\n     */\n    public $leftOperand;\n\n    /**\n     * @var \\QueryTranslator\\Values\\Node\n     */\n    public $rightOperand;\n\n    /**\n     * @var \\QueryTranslator\\Values\\Token\n     */\n    public $token;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Node $leftOperand\n     * @param \\QueryTranslator\\Values\\Node $rightOperand\n     * @param \\QueryTranslator\\Values\\Token $token\n     */\n    public function __construct(\n        Node $leftOperand = null,\n        Node $rightOperand = null,\n        Token $token = null\n    ) {\n        $this->leftOperand = $leftOperand;\n        $this->rightOperand = $rightOperand;\n        $this->token = $token;\n    }\n\n    public function getNodes()\n    {\n        return [\n            $this->leftOperand,\n            $this->rightOperand,\n        ];\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Node/LogicalNot.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Node;\n\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\nfinal class LogicalNot extends Node\n{\n    /**\n     * @var \\QueryTranslator\\Values\\Node\n     */\n    public $operand;\n\n    /**\n     * @var \\QueryTranslator\\Values\\Token\n     */\n    public $token;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Node $operand\n     * @param \\QueryTranslator\\Values\\Token $token\n     */\n    public function __construct(Node $operand = null, Token $token = null)\n    {\n        $this->operand = $operand;\n        $this->token = $token;\n    }\n\n    public function getNodes()\n    {\n        return [$this->operand];\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Node/LogicalOr.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Node;\n\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\nfinal class LogicalOr extends Node\n{\n    /**\n     * @var \\QueryTranslator\\Values\\Node\n     */\n    public $leftOperand;\n\n    /**\n     * @var \\QueryTranslator\\Values\\Node\n     */\n    public $rightOperand;\n\n    /**\n     * @var \\QueryTranslator\\Values\\Token\n     */\n    public $token;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Node $leftOperand\n     * @param \\QueryTranslator\\Values\\Node $rightOperand\n     * @param \\QueryTranslator\\Values\\Token $token\n     */\n    public function __construct(\n        Node $leftOperand = null,\n        Node $rightOperand = null,\n        Token $token = null\n    ) {\n        $this->leftOperand = $leftOperand;\n        $this->rightOperand = $rightOperand;\n        $this->token = $token;\n    }\n\n    public function getNodes()\n    {\n        return [\n            $this->leftOperand,\n            $this->rightOperand,\n        ];\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Node/Mandatory.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Node;\n\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\nfinal class Mandatory extends Node\n{\n    /**\n     * @var \\QueryTranslator\\Values\\Node\n     */\n    public $operand;\n\n    /**\n     * @var \\QueryTranslator\\Values\\Token\n     */\n    public $token;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Node $operand\n     * @param \\QueryTranslator\\Values\\Token $token\n     */\n    public function __construct(Node $operand = null, Token $token = null)\n    {\n        $this->operand = $operand;\n        $this->token = $token;\n    }\n\n    public function getNodes()\n    {\n        return [$this->operand];\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Node/Prohibited.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Node;\n\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\nfinal class Prohibited extends Node\n{\n    /**\n     * @var \\QueryTranslator\\Values\\Node\n     */\n    public $operand;\n\n    /**\n     * @var \\QueryTranslator\\Values\\Token\n     */\n    public $token;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Node $operand\n     * @param \\QueryTranslator\\Values\\Token $token\n     */\n    public function __construct(Node $operand = null, Token $token = null)\n    {\n        $this->operand = $operand;\n        $this->token = $token;\n    }\n\n    public function getNodes()\n    {\n        return [$this->operand];\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Node/Query.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Node;\n\nuse QueryTranslator\\Values\\Node;\n\nfinal class Query extends Node\n{\n    /**\n     * @var \\QueryTranslator\\Values\\Node[]\n     */\n    public $nodes;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Node[] $nodes\n     */\n    public function __construct(array $nodes)\n    {\n        $this->nodes = $nodes;\n    }\n\n    public function getNodes()\n    {\n        return $this->nodes;\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Node/Term.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Node;\n\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\nfinal class Term extends Node\n{\n    /**\n     * @var \\QueryTranslator\\Values\\Token\n     */\n    public $token;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Token $token\n     */\n    public function __construct(Token $token)\n    {\n        $this->token = $token;\n    }\n\n    public function getNodes()\n    {\n        return [];\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Token/GroupBegin.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Token;\n\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * GroupBegin token represents group's domain and left side delimiter.\n */\nfinal class GroupBegin extends Token\n{\n    /**\n     * Holds group's left side delimiter string.\n     *\n     * @var string\n     */\n    public $delimiter;\n\n    /**\n     * Holds domain string.\n     *\n     * @var string\n     */\n    public $domain;\n\n    /**\n     * @param string $lexeme\n     * @param int $position\n     * @param string $domain\n     * @param string $delimiter\n     */\n    public function __construct($lexeme, $position, $delimiter, $domain)\n    {\n        $this->delimiter = $delimiter;\n        $this->domain = $domain;\n\n        parent::__construct(Tokenizer::TOKEN_GROUP_BEGIN, $lexeme, $position);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Token/Phrase.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Token;\n\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * Phrase term token.\n *\n * @see \\QueryTranslator\\Languages\\Galach\\Tokenizer::TOKEN_TERM\n */\nfinal class Phrase extends Token\n{\n    /**\n     * Holds domain identifier or null if not set.\n     *\n     * @var null|string\n     */\n    public $domain;\n\n    /**\n     * @var string\n     */\n    public $quote;\n\n    /**\n     * @var string\n     */\n    public $phrase;\n\n    /**\n     * @param string $lexeme\n     * @param int $position\n     * @param string $domain\n     * @param string $quote\n     * @param string $phrase\n     */\n    public function __construct($lexeme, $position, $domain, $quote, $phrase)\n    {\n        $this->domain = $domain;\n        $this->quote = $quote;\n        $this->phrase = $phrase;\n\n        parent::__construct(Tokenizer::TOKEN_TERM, $lexeme, $position);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Token/Tag.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Token;\n\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * Tag term token.\n *\n * @see \\QueryTranslator\\Languages\\Galach\\Tokenizer::TOKEN_TERM\n */\nfinal class Tag extends Token\n{\n    /**\n     * @var string\n     */\n    public $marker;\n\n    /**\n     * @var string\n     */\n    public $tag;\n\n    /**\n     * @param string $lexeme\n     * @param int $position\n     * @param string $marker\n     * @param string $tag\n     */\n    public function __construct($lexeme, $position, $marker, $tag)\n    {\n        $this->marker = $marker;\n        $this->tag = $tag;\n\n        parent::__construct(Tokenizer::TOKEN_TERM, $lexeme, $position);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Token/User.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Token;\n\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * User term token.\n *\n * @see \\QueryTranslator\\Languages\\Galach\\Tokenizer::TOKEN_TERM\n */\nfinal class User extends Token\n{\n    /**\n     * @var string\n     */\n    public $marker;\n\n    /**\n     * @var string\n     */\n    public $user;\n\n    /**\n     * @param string $lexeme\n     * @param int $position\n     * @param string $marker\n     * @param string $user\n     */\n    public function __construct($lexeme, $position, $marker, $user)\n    {\n        $this->marker = $marker;\n        $this->user = $user;\n\n        parent::__construct(Tokenizer::TOKEN_TERM, $lexeme, $position);\n    }\n}\n"
  },
  {
    "path": "lib/Languages/Galach/Values/Token/Word.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Languages\\Galach\\Values\\Token;\n\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * Word term token.\n *\n * @see \\QueryTranslator\\Languages\\Galach\\Tokenizer::TOKEN_TERM\n */\nfinal class Word extends Token\n{\n    /**\n     * Holds domain string.\n     *\n     * @var string\n     */\n    public $domain;\n\n    /**\n     * @var string\n     */\n    public $word;\n\n    /**\n     * @param string $lexeme\n     * @param int $position\n     * @param string $domain\n     * @param string $word\n     */\n    public function __construct($lexeme, $position, $domain, $word)\n    {\n        $this->domain = $domain;\n        $this->word = $word;\n\n        parent::__construct(Tokenizer::TOKEN_TERM, $lexeme, $position);\n    }\n}\n"
  },
  {
    "path": "lib/Parsing.php",
    "content": "<?php\n\nnamespace QueryTranslator;\n\nuse QueryTranslator\\Values\\TokenSequence;\n\n/**\n * Interface for parsing a sequence of tokens into a syntax tree.\n */\ninterface Parsing\n{\n    /**\n     * Parse the given $tokenSequence.\n     *\n     * @param \\QueryTranslator\\Values\\TokenSequence $tokenSequence\n     *\n     * @return \\QueryTranslator\\Values\\SyntaxTree\n     */\n    public function parse(TokenSequence $tokenSequence);\n}\n"
  },
  {
    "path": "lib/Tokenizing.php",
    "content": "<?php\n\nnamespace QueryTranslator;\n\n/**\n * Interface for tokenizing a string into a sequence of tokens.\n */\ninterface Tokenizing\n{\n    /**\n     * Tokenize the given $string.\n     *\n     * @param string $string Input string\n     *\n     * @return \\QueryTranslator\\Values\\TokenSequence\n     */\n    public function tokenize($string);\n}\n"
  },
  {
    "path": "lib/Values/Correction.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Values;\n\n/**\n * Represents a correction applied during parsing of the token sequence.\n *\n * @see \\QueryTranslator\\Parsing\n * @see \\QueryTranslator\\Values\\TokenSequence\n */\nclass Correction\n{\n    /**\n     * Correction type constant.\n     *\n     * Defined by the language implementation.\n     *\n     * @var mixed\n     */\n    public $type;\n\n    /**\n     * An array of tokens that correction affects.\n     *\n     * @var \\QueryTranslator\\Values\\Token[]\n     */\n    public $tokens = [];\n\n    /**\n     * @param mixed $type\n     * @param \\QueryTranslator\\Values\\Token[] ...$tokens\n     */\n    public function __construct($type, Token ...$tokens)\n    {\n        $this->type = $type;\n        $this->tokens = $tokens;\n    }\n}\n"
  },
  {
    "path": "lib/Values/Node.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Values;\n\n/**\n * Node is a basic building element of the syntax tree.\n *\n * @see \\QueryTranslator\\Values\\SyntaxTree\n */\nabstract class Node\n{\n    /**\n     * Return an array of sub-nodes.\n     *\n     * @return \\QueryTranslator\\Values\\Node[]\n     */\n    abstract public function getNodes();\n}\n"
  },
  {
    "path": "lib/Values/SyntaxTree.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Values;\n\n/**\n * Syntax tree is an abstract hierarchical representation of the query syntax,\n * intended for easy conversion into different concrete formats.\n *\n * @see \\QueryTranslator\\Parsing::parse()\n */\nclass SyntaxTree\n{\n    /**\n     * The root node of the syntax tree.\n     *\n     * @var \\QueryTranslator\\Values\\Node\n     */\n    public $rootNode;\n\n    /**\n     * Token sequence that was parsed into this syntax tree.\n     *\n     * @var \\QueryTranslator\\Values\\TokenSequence\n     */\n    public $tokenSequence;\n\n    /**\n     * An array of corrections performed while parsing the token sequence.\n     *\n     * @var \\QueryTranslator\\Values\\Correction[]\n     */\n    public $corrections;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Node $rootNode\n     * @param \\QueryTranslator\\Values\\TokenSequence $tokenSequence\n     * @param \\QueryTranslator\\Values\\Correction[] $corrections\n     */\n    public function __construct(Node $rootNode, TokenSequence $tokenSequence, array $corrections)\n    {\n        $this->rootNode = $rootNode;\n        $this->tokenSequence = $tokenSequence;\n        $this->corrections = $corrections;\n    }\n}\n"
  },
  {
    "path": "lib/Values/Token.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Values;\n\n/**\n * Token represents a sequence of characters which forms a syntactic unit.\n */\nclass Token\n{\n    /**\n     * Token type constant.\n     *\n     * Categorizes the token for the purpose of parsing.\n     * Defined by the language implementation.\n     *\n     * @var string\n     */\n    public $type;\n\n    /**\n     * Token lexeme is a part of the input string recognized as token.\n     *\n     * @var string\n     */\n    public $lexeme;\n\n    /**\n     * Position of the lexeme in the input string.\n     *\n     * @var int\n     */\n    public $position;\n\n    /**\n     * @param string $type\n     * @param string $lexeme\n     * @param int $position\n     */\n    public function __construct($type, $lexeme, $position)\n    {\n        $this->type = $type;\n        $this->lexeme = $lexeme;\n        $this->position = $position;\n    }\n}\n"
  },
  {
    "path": "lib/Values/TokenSequence.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Values;\n\n/**\n * Token sequence holds an array of tokens extracted from the query string.\n *\n * @see \\QueryTranslator\\Tokenizing::tokenize()\n * @see \\QueryTranslator\\Values\\Token\n */\nclass TokenSequence\n{\n    /**\n     * An array of tokens extracted from the input string.\n     *\n     * @var \\QueryTranslator\\Values\\Token[]\n     */\n    public $tokens;\n\n    /**\n     * Source query string, unmodified.\n     *\n     * @var string\n     */\n    public $source;\n\n    /**\n     * @param \\QueryTranslator\\Values\\Token[] $tokens\n     * @param string $source\n     */\n    public function __construct(array $tokens, $source)\n    {\n        $this->tokens = $tokens;\n        $this->source = $source;\n    }\n}\n"
  },
  {
    "path": "phpunit.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<phpunit bootstrap=\"./tests/bootstrap.php\"\n         colors=\"true\"\n         convertErrorsToExceptions=\"true\"\n         convertNoticesToExceptions=\"true\"\n         convertWarningsToExceptions=\"true\">\n    <php>\n        <env name=\"SYMFONY_DEPRECATIONS_HELPER\" value=\"disabled\"/>\n    </php>\n    <testsuites>\n        <testsuite name=\"Query parser tests\">\n            <directory suffix=\"Test.php\">./tests</directory>\n        </testsuite>\n    </testsuites>\n    <coverage processUncoveredFiles=\"true\">\n        <include>\n            <directory suffix=\".php\">./lib</directory>\n        </include>\n    </coverage>\n</phpunit>\n"
  },
  {
    "path": "tests/Galach/Generators/AggregateVisitorDispatchTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach\\Generators;\n\nuse PHPUnit\\Framework\\TestCase;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Aggregate;\nuse QueryTranslator\\Values\\Node;\nuse RuntimeException;\n\n/**\n * Test case for Aggregate visitor.\n */\nclass AggregateVisitorDispatchTest extends TestCase\n{\n    public function testAccept()\n    {\n        /** @var \\QueryTranslator\\Values\\Node $nodeMock */\n        $nodeMock = $this->getMockBuilder(Node::class)->getMock();\n\n        $this->assertTrue((new Aggregate())->accept($nodeMock));\n    }\n\n    public function testVisitThrowsException()\n    {\n        $this->expectException(RuntimeException::class);\n        $this->expectExceptionMessage('No visitor available for Mock');\n\n        /** @var \\QueryTranslator\\Values\\Node $nodeMock */\n        $nodeMock = $this->getMockBuilder(Node::class)->getMock();\n\n        (new Aggregate())->visit($nodeMock);\n    }\n}\n"
  },
  {
    "path": "tests/Galach/Generators/ExtendedDisMaxTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach\\Generators;\n\nuse PHPUnit\\Framework\\TestCase;\nuse QueryTranslator\\Languages\\Galach\\Generators;\nuse QueryTranslator\\Languages\\Galach\\Parser;\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor;\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\n\n/**\n * Test case for ExtendedDisMax generator.\n */\nclass ExtendedDisMaxTest extends TestCase\n{\n    const FIELD_USER = 'user_s';\n    const FIELD_TAG = 'tags_ms';\n    const FIELD_TEXT_DEFAULT = 'default_text_t';\n    const FIELD_TEXT_DOMAIN = 'domain';\n    const FIELD_TEXT_DOMAIN_MAPPED = 'special_text_t';\n\n    public function providerForTestTranslation()\n    {\n        return [\n            [\n                'one',\n                'one',\n            ],\n            [\n                \"'one'\",\n                \"'one'\",\n            ],\n            [\n                '\"one\"',\n                '\"one\"',\n            ],\n            [\n                'one two',\n                'one two',\n            ],\n            [\n                '(one two)',\n                '(one two)',\n            ],\n            [\n                'unexpected:(one two)',\n                'default_text_t:(one two)',\n            ],\n            [\n                'domain:(one two)',\n                'special_text_t:(one two)',\n            ],\n            [\n                'one AND two',\n                'one AND two',\n            ],\n            [\n                'one && two',\n                'one AND two',\n            ],\n            [\n                'one OR two',\n                'one OR two',\n            ],\n            [\n                'one || two',\n                'one OR two',\n            ],\n            [\n                'NOT one',\n                'NOT one',\n            ],\n            [\n                '!one',\n                'NOT one',\n            ],\n            [\n                '+one',\n                '+one',\n            ],\n            [\n                '-one',\n                '-one',\n            ],\n            [\n                '@user',\n                'user_s:user',\n            ],\n            [\n                '#tag',\n                'tags_ms:tag',\n            ],\n            [\n                'unexpected:one',\n                'default_text_t:one',\n            ],\n            [\n                'domain:one',\n                'special_text_t:one',\n            ],\n            [\n                \"unexpected:'one'\",\n                \"default_text_t:'one'\",\n            ],\n            [\n                \"domain:'one'\",\n                \"special_text_t:'one'\",\n            ],\n            [\n                'unexpected:\"one\"',\n                'default_text_t:\"one\"',\n            ],\n            [\n                'domain:\"one\"',\n                'special_text_t:\"one\"',\n            ],\n            [\n                '\\\\',\n                '\\\\\\\\',\n            ],\n            [\n                '\\\\+',\n                '\\\\+',\n            ],\n            [\n                '\\\\-',\n                '\\\\-',\n            ],\n            [\n                '\\\\&&',\n                '\\\\\\\\\\\\&&',\n            ],\n            [\n                '\\\\||',\n                '\\\\\\\\\\\\||',\n            ],\n            [\n                '\\\\!',\n                '\\\\!',\n            ],\n            [\n                '\\\\(',\n                '\\\\(',\n            ],\n            [\n                '\\\\)',\n                '\\\\)',\n            ],\n            [\n                '\\\\{',\n                '\\\\\\\\\\\\{',\n            ],\n            [\n                '\\\\}',\n                '\\\\\\\\\\\\}',\n            ],\n            [\n                '\\\\[',\n                '\\\\\\\\\\\\[',\n            ],\n            [\n                '\\\\]',\n                '\\\\\\\\\\\\]',\n            ],\n            [\n                '\\\\^',\n                '\\\\\\\\\\\\^',\n            ],\n            [\n                '\\\\\"',\n                '\\\\\"',\n            ],\n            [\n                '\\\\~',\n                '\\\\\\\\\\\\~',\n            ],\n            [\n                '\\\\*',\n                '\\\\\\\\\\\\*',\n            ],\n            [\n                '\\\\?',\n                '\\\\\\\\\\\\?',\n            ],\n            [\n                '\\\\:',\n                '\\\\:',\n            ],\n            [\n                '\\\\/',\n                '\\\\\\\\\\\\/',\n            ],\n            [\n                '\\\\\\\\',\n                '\\\\\\\\',\n            ],\n            [\n                '\\\\ ',\n                '\\\\ ',\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestTranslation\n     *\n     * @param string $string\n     * @param string $expectedTranslatedString\n     */\n    public function testTranslation($string, $expectedTranslatedString)\n    {\n        $tokenExtractor = new TokenExtractor\\Full();\n        $tokenizer = new Tokenizer($tokenExtractor);\n        $parser = new Parser();\n        $generator = $this->getGenerator();\n\n        $tokenSequence = $tokenizer->tokenize($string);\n        $syntaxTree = $parser->parse($tokenSequence);\n        $translatedString = $generator->generate($syntaxTree);\n\n        $this->assertEquals($expectedTranslatedString, $translatedString);\n    }\n\n    /**\n     * @return \\QueryTranslator\\Languages\\Galach\\Generators\\ExtendedDisMax\n     */\n    protected function getGenerator()\n    {\n        $visitors = [];\n\n        $visitors[] = new Generators\\Lucene\\Common\\Prohibited();\n        $visitors[] = new Generators\\Lucene\\Common\\Group(\n            [\n                self::FIELD_TEXT_DOMAIN => self::FIELD_TEXT_DOMAIN_MAPPED,\n            ],\n            self::FIELD_TEXT_DEFAULT\n        );\n        $visitors[] = new Generators\\Lucene\\Common\\Mandatory();\n        $visitors[] = new Generators\\Lucene\\Common\\LogicalAnd();\n        $visitors[] = new Generators\\Lucene\\Common\\LogicalNot();\n        $visitors[] = new Generators\\Lucene\\Common\\LogicalOr();\n        $visitors[] = new Generators\\Lucene\\Common\\Phrase(\n            [\n                self::FIELD_TEXT_DOMAIN => self::FIELD_TEXT_DOMAIN_MAPPED,\n            ],\n            self::FIELD_TEXT_DEFAULT\n        );\n        $visitors[] = new Generators\\Lucene\\Common\\Query();\n        $visitors[] = new Generators\\Lucene\\Common\\Tag(self::FIELD_TAG);\n        $visitors[] = new Generators\\Lucene\\Common\\User(self::FIELD_USER);\n        $visitors[] = new Generators\\Lucene\\ExtendedDisMax\\Word(\n            [\n                self::FIELD_TEXT_DOMAIN => self::FIELD_TEXT_DOMAIN_MAPPED,\n            ],\n            self::FIELD_TEXT_DEFAULT\n        );\n\n        $aggregate = new Generators\\Common\\Aggregate($visitors);\n\n        return new Generators\\ExtendedDisMax($aggregate);\n    }\n}\n"
  },
  {
    "path": "tests/Galach/Generators/LuceneVisitorDispatchTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach\\Generators;\n\nuse LogicException;\nuse PHPUnit\\Framework\\TestCase;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\Group;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\LogicalAnd;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\LogicalNot;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\LogicalOr;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\Mandatory;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\Phrase;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\Prohibited;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\Query;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\Tag;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\Common\\User;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\ExtendedDisMax\\Word as ExtendedDisMaxWord;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Lucene\\QueryString\\Word as QueryStringWord;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Group as GroupNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalAnd as LogicalAndNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalNot as LogicalNotNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalOr as LogicalOrNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Mandatory as MandatoryNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Prohibited as ProhibitedNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Query as QueryNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * Test case for Lucene visitors.\n */\nclass LuceneVisitorDispatchTest extends TestCase\n{\n    public function providerForTestVisitThrowsLogicExceptionNode()\n    {\n        $nodeMock = $this->getMockBuilder(Node::class)->getMock();\n\n        return [\n            [\n                new Group(),\n                $nodeMock,\n                'Implementation accepts instance of Group Node',\n            ],\n            [\n                new LogicalAnd(),\n                $nodeMock,\n                'Implementation accepts instance of LogicalAnd Node',\n            ],\n            [\n                new LogicalNot(),\n                $nodeMock,\n                'Implementation accepts instance of LogicalNot Node',\n            ],\n            [\n                new LogicalOr(),\n                $nodeMock,\n                'Implementation accepts instance of LogicalOr Node',\n            ],\n            [\n                new Mandatory(),\n                $nodeMock,\n                'Implementation accepts instance of Mandatory Node',\n            ],\n            [\n                new Phrase(),\n                $nodeMock,\n                'Implementation accepts instance of Term Node',\n            ],\n            [\n                new Prohibited(),\n                $nodeMock,\n                'Implementation accepts instance of Prohibited Node',\n            ],\n            [\n                new Query(),\n                $nodeMock,\n                'Implementation accepts instance of Query Node',\n            ],\n            [\n                new Tag(),\n                $nodeMock,\n                'Implementation accepts instance of Term Node',\n            ],\n            [\n                new User(),\n                $nodeMock,\n                'Implementation accepts instance of Term Node',\n            ],\n            [\n                new ExtendedDisMaxWord(),\n                $nodeMock,\n                'Implementation accepts instance of Term Node',\n            ],\n            [\n                new QueryStringWord(),\n                $nodeMock,\n                'Implementation accepts instance of Term Node',\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestVisitThrowsLogicExceptionNode\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor $visitor\n     * @param \\QueryTranslator\\Values\\Node $node\n     * @param string $expectedExceptionMessage\n     */\n    public function testVisitThrowsLogicExceptionNode(Visitor $visitor, Node $node, $expectedExceptionMessage)\n    {\n        $this->expectException(LogicException::class);\n\n        try {\n            $visitor->visit($node);\n        } catch (LogicException $e) {\n            $this->assertSame($expectedExceptionMessage, $e->getMessage());\n            throw $e;\n        }\n    }\n\n    public function providerForTestVisitThrowsLogicExceptionToken()\n    {\n        /** @var \\QueryTranslator\\Values\\Token $tokenMock */\n        $tokenMock = $this->getMockBuilder(Token::class)->disableOriginalConstructor()->getMock();\n        $node = new Term($tokenMock);\n\n        return [\n            [\n                new Phrase(),\n                $node,\n                'Implementation accepts instance of Phrase Token',\n            ],\n            [\n                new Tag(),\n                $node,\n                'Implementation accepts instance of Tag Token',\n            ],\n            [\n                new User(),\n                $node,\n                'Implementation accepts instance of User Token',\n            ],\n            [\n                new ExtendedDisMaxWord(),\n                $node,\n                'Implementation accepts instance of Word Token',\n            ],\n            [\n                new QueryStringWord(),\n                $node,\n                'Implementation accepts instance of Word Token',\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestVisitThrowsLogicExceptionToken\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor $visitor\n     * @param \\QueryTranslator\\Values\\Node $node\n     * @param string $expectedExceptionMessage\n     */\n    public function testVisitThrowsLogicExceptionToken(Visitor $visitor, Node $node, $expectedExceptionMessage)\n    {\n        $this->expectException(LogicException::class);\n\n        try {\n            $visitor->visit($node);\n        } catch (LogicException $e) {\n            $this->assertSame($expectedExceptionMessage, $e->getMessage());\n            throw $e;\n        }\n    }\n\n    public function providerForTestVisitThrowsLogicExceptionSubVisitor()\n    {\n        return [\n            [\n                new Group(),\n                new GroupNode(),\n            ],\n            [\n                new LogicalAnd(),\n                new LogicalAndNode(),\n            ],\n            [\n                new LogicalNot(),\n                new LogicalNotNode(),\n            ],\n            [\n                new LogicalOr(),\n                new LogicalOrNode(),\n            ],\n            [\n                new Mandatory(),\n                new MandatoryNode(),\n            ],\n            [\n                new Prohibited(),\n                new ProhibitedNode(),\n            ],\n            [\n                new Query(),\n                new QueryNode([]),\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestVisitThrowsLogicExceptionSubVisitor\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor $visitor\n     * @param \\QueryTranslator\\Values\\Node $node\n     */\n    public function testVisitThrowsLogicExceptionSubVisitor(Visitor $visitor, Node $node)\n    {\n        $this->expectException(LogicException::class);\n        $this->expectExceptionMessage(\"Implementation requires sub-visitor\");\n\n        $visitor->visit($node);\n    }\n}\n"
  },
  {
    "path": "tests/Galach/Generators/NativeVisitorDispatchTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach\\Generators;\n\nuse LogicException;\nuse PHPUnit\\Framework\\TestCase;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Native\\BinaryOperator;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Native\\Group;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Native\\Phrase;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Native\\Query;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Native\\Tag;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Native\\UnaryOperator;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Native\\User;\nuse QueryTranslator\\Languages\\Galach\\Generators\\Native\\Word;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Group as GroupNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalAnd as LogicalAndNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalNot as LogicalNotNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalOr as LogicalOrNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Mandatory as MandatoryNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Prohibited as ProhibitedNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Query as QueryNode;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * Test case for Native visitors.\n */\nclass NativeVisitorDispatchTest extends TestCase\n{\n    public function providerForTestVisitThrowsLogicExceptionNode()\n    {\n        $nodeMock = $this->getMockBuilder(Node::class)->getMock();\n\n        return [\n            [\n                new BinaryOperator(),\n                $nodeMock,\n                'Implementation accepts instance of LogicalAnd or LogicalOr Node',\n            ],\n            [\n                new Group(),\n                $nodeMock,\n                'Implementation accepts instance of Group Node',\n            ],\n            [\n                new Phrase(),\n                $nodeMock,\n                'Implementation accepts instance of Term Node',\n            ],\n            [\n                new Query(),\n                $nodeMock,\n                'Implementation accepts instance of Query Node',\n            ],\n            [\n                new Tag(),\n                $nodeMock,\n                'Implementation accepts instance of Term Node',\n            ],\n            [\n                new UnaryOperator(),\n                $nodeMock,\n                'Implementation accepts instance of Mandatory, Prohibited or LogicalNot Node',\n            ],\n            [\n                new User(),\n                $nodeMock,\n                'Implementation accepts instance of Term Node',\n            ],\n            [\n                new Word(),\n                $nodeMock,\n                'Implementation accepts instance of Term Node',\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestVisitThrowsLogicExceptionNode\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor $visitor\n     * @param \\QueryTranslator\\Values\\Node $node\n     * @param string $expectedExceptionMessage\n     */\n    public function testVisitThrowsLogicExceptionNode(Visitor $visitor, Node $node, $expectedExceptionMessage)\n    {\n        $this->expectException(LogicException::class);\n\n        try {\n            $visitor->visit($node);\n        } catch (LogicException $e) {\n            $this->assertSame($expectedExceptionMessage, $e->getMessage());\n            throw $e;\n        }\n    }\n\n    public function providerForTestVisitThrowsLogicExceptionToken()\n    {\n        /** @var \\QueryTranslator\\Values\\Token $tokenMock */\n        $tokenMock = $this->getMockBuilder(Token::class)->disableOriginalConstructor()->getMock();\n        $node = new Term($tokenMock);\n\n        return [\n            [\n                new Phrase(),\n                $node,\n                'Implementation accepts instance of Phrase Token',\n            ],\n            [\n                new Tag(),\n                $node,\n                'Implementation accepts instance of Tag Token',\n            ],\n            [\n                new User(),\n                $node,\n                'Implementation accepts instance of User Token',\n            ],\n            [\n                new Word(),\n                $node,\n                'Implementation accepts instance of Word Token',\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestVisitThrowsLogicExceptionToken\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor $visitor\n     * @param \\QueryTranslator\\Values\\Node $node\n     * @param string $expectedExceptionMessage\n     */\n    public function testVisitThrowsLogicExceptionToken(Visitor $visitor, Node $node, $expectedExceptionMessage)\n    {\n        $this->expectException(LogicException::class);\n\n        try {\n            $visitor->visit($node);\n        } catch (LogicException $e) {\n            $this->assertSame($expectedExceptionMessage, $e->getMessage());\n            throw $e;\n        }\n    }\n\n    public function providerForTestVisitThrowsLogicExceptionSubVisitor()\n    {\n        return [\n            [\n                new BinaryOperator(),\n                new LogicalAndNode(),\n            ],\n            [\n                new BinaryOperator(),\n                new LogicalOrNode(),\n            ],\n            [\n                new Group(),\n                new GroupNode(),\n            ],\n            [\n                new Query(),\n                new QueryNode([]),\n            ],\n            [\n                new UnaryOperator(),\n                new LogicalNotNode(),\n            ],\n            [\n                new UnaryOperator(),\n                new MandatoryNode(),\n            ],\n            [\n                new UnaryOperator(),\n                new ProhibitedNode(),\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestVisitThrowsLogicExceptionSubVisitor\n     *\n     * @param \\QueryTranslator\\Languages\\Galach\\Generators\\Common\\Visitor $visitor\n     * @param \\QueryTranslator\\Values\\Node $node\n     */\n    public function testVisitThrowsLogicExceptionSubVisitor(Visitor $visitor, Node $node)\n    {\n        $this->expectException(LogicException::class);\n        $this->expectExceptionMessage(\"Implementation requires sub-visitor\");\n\n        $visitor->visit($node);\n    }\n}\n"
  },
  {
    "path": "tests/Galach/Generators/QueryStringTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach\\Generators;\n\nuse QueryTranslator\\Languages\\Galach\\Generators;\nuse QueryTranslator\\Languages\\Galach\\Generators\\QueryString;\n\n/**\n * Test case for QueryString generator.\n */\nclass QueryStringTest extends ExtendedDisMaxTest\n{\n    public function providerForTestTranslation()\n    {\n        return array_merge(\n            parent::providerForTestTranslation(),\n            [\n                [\n                    '\\\\=',\n                    '\\\\\\\\\\\\=',\n                ],\n                [\n                    '\\\\>',\n                    '\\\\\\\\\\\\>',\n                ],\n                [\n                    '\\\\<',\n                    '\\\\\\\\\\\\<',\n                ],\n            ]\n        );\n    }\n\n    /**\n     * @return \\QueryTranslator\\Languages\\Galach\\Generators\\QueryString\n     */\n    protected function getGenerator()\n    {\n        $visitors = [];\n\n        $visitors[] = new Generators\\Lucene\\Common\\Prohibited();\n        $visitors[] = new Generators\\Lucene\\Common\\Group(\n            [\n                self::FIELD_TEXT_DOMAIN => self::FIELD_TEXT_DOMAIN_MAPPED,\n            ],\n            self::FIELD_TEXT_DEFAULT\n        );\n        $visitors[] = new Generators\\Lucene\\Common\\Mandatory();\n        $visitors[] = new Generators\\Lucene\\Common\\LogicalAnd();\n        $visitors[] = new Generators\\Lucene\\Common\\LogicalNot();\n        $visitors[] = new Generators\\Lucene\\Common\\LogicalOr();\n        $visitors[] = new Generators\\Lucene\\Common\\Phrase(\n            [\n                self::FIELD_TEXT_DOMAIN => self::FIELD_TEXT_DOMAIN_MAPPED,\n            ],\n            self::FIELD_TEXT_DEFAULT\n        );\n        $visitors[] = new Generators\\Lucene\\Common\\Query();\n        $visitors[] = new Generators\\Lucene\\Common\\Tag(self::FIELD_TAG);\n        $visitors[] = new Generators\\Lucene\\Common\\User(self::FIELD_USER);\n        $visitors[] = new Generators\\Lucene\\QueryString\\Word(\n            [\n                self::FIELD_TEXT_DOMAIN => self::FIELD_TEXT_DOMAIN_MAPPED,\n            ],\n            self::FIELD_TEXT_DEFAULT\n        );\n\n        $aggregate = new Generators\\Common\\Aggregate($visitors);\n\n        return new QueryString($aggregate);\n    }\n}\n"
  },
  {
    "path": "tests/Galach/IntegrationTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach;\n\nuse PHPUnit\\Framework\\TestCase;\nuse QueryTranslator\\Languages\\Galach\\Generators;\nuse QueryTranslator\\Languages\\Galach\\Parser;\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor;\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Group;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalAnd;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalNot;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalOr;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Mandatory;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Prohibited;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Query;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin as GroupBeginToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Phrase as PhraseToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Tag as TagToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\User as UserToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Word as WordToken;\nuse QueryTranslator\\Values\\Correction;\nuse QueryTranslator\\Values\\SyntaxTree;\nuse QueryTranslator\\Values\\Token;\nuse QueryTranslator\\Values\\TokenSequence;\n\n/**\n * Tests integration of language components.\n *\n *  - tokenization of the query string into a sequence of tokens\n *  - parsing the token sequence into a syntax tree\n *  - generating the result by traversing the syntax tree\n */\nclass IntegrationTest extends TestCase\n{\n    public function providerForTestQuery()\n    {\n        return [\n            [\n                '',\n                [],\n                new Query([]),\n            ],\n            [\n                'one',\n                [\n                    $token = new WordToken('one', 0, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Term($token),\n                    ]\n                ),\n            ],\n            [\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new WordToken('two', 4, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token2),\n                    ]\n                ),\n            ],\n            [\n                'one AND two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new WordToken('two', 8, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one OR two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 4),\n                    $token3 = new WordToken('two', 7, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Term($token1),\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one OR two AND three',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 4),\n                    $token3 = new WordToken('two', 7, '', 'two'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 11),\n                    $token5 = new WordToken('three', 15, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Term($token1),\n                            new LogicalAnd(\n                                new Term($token3),\n                                new Term($token5),\n                                $token4\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one AND two OR three',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new WordToken('two', 8, '', 'two'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 12),\n                    $token5 = new WordToken('three', 15, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new LogicalAnd(\n                                new Term($token1),\n                                new Term($token3),\n                                $token2\n                            ),\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'NOT one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new WordToken('one', 4, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token2),\n                            $token1\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one NOT two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    $token3 = new WordToken('two', 8, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new LogicalNot(\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one AND NOT two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 8),\n                    $token4 = new WordToken('two', 12, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new LogicalNot(\n                                new Term($token4),\n                                $token3\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one OR NOT two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 7),\n                    $token4 = new WordToken('two', 11, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Term($token1),\n                            new LogicalNot(\n                                new Term($token4),\n                                $token3\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '!one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token2),\n                            $token1\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one !two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                    $token3 = new WordToken('two', 5, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new LogicalNot(\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one AND !two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 8),\n                    $token4 = new WordToken('two', 9, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new LogicalNot(\n                                new Term($token4),\n                                $token3\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one OR !two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 7),\n                    $token4 = new WordToken('two', 8, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Term($token1),\n                            new LogicalNot(\n                                new Term($token4),\n                                $token3\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '(one two)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new WordToken('two', 5, '', 'two'),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 8),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new Term($token2),\n                                new Term($token3),\n                            ],\n                            $token1,\n                            $token4\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '(one AND two)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new WordToken('two', 9, '', 'two'),\n                    $token5 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 12),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalAnd(\n                                    new Term($token2),\n                                    new Term($token4),\n                                    $token3\n                                ),\n                            ],\n                            $token1,\n                            $token5\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '(NOT one OR two)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 1),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 9),\n                    $token5 = new WordToken('two', 12, '', 'two'),\n                    $token6 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 15),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalOr(\n                                    new LogicalNot(\n                                        new Term($token3),\n                                        $token2\n                                    ),\n                                    new Term($token5),\n                                    $token4\n                                ),\n                            ],\n                            $token1,\n                            $token6\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one AND (two OR three)',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new GroupBeginToken('(', 8, '(', null),\n                    $token4 = new WordToken('two', 9, '', 'two'),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 13),\n                    $token6 = new WordToken('three', 16, '', 'three'),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 21),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new Group(\n                                [\n                                    new LogicalOr(\n                                        new Term($token4),\n                                        new Term($token6),\n                                        $token5\n                                    ),\n                                ],\n                                $token3,\n                                $token7\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '((one) AND (two AND (three OR four five)))',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 7),\n                    $token6 = new GroupBeginToken('(', 11, '(', null),\n                    $token7 = new WordToken('two', 12, '', 'two'),\n                    $token8 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 16),\n                    $token9 = new GroupBeginToken('(', 20, '(', null),\n                    $token10 = new WordToken('three', 21, '', 'three'),\n                    $token11 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 27),\n                    $token12 = new WordToken('four', 30, '', 'four'),\n                    $token13 = new WordToken('five', 35, '', 'five'),\n                    $token14 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 39),\n                    $token15 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 40),\n                    $token16 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 41),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalAnd(\n                                    new Group(\n                                        [\n                                            new Term($token3),\n                                        ],\n                                        $token2,\n                                        $token4\n                                    ),\n                                    new Group(\n                                        [\n                                            new LogicalAnd(\n                                                new Term($token7),\n                                                new Group(\n                                                    [\n                                                        new LogicalOr(\n                                                            new Term($token10),\n                                                            new Term($token12),\n                                                            $token11\n                                                        ),\n                                                        new Term($token13),\n                                                    ],\n                                                    $token9,\n                                                    $token14\n                                                ),\n                                                $token8\n                                            ),\n                                        ],\n                                        $token6,\n                                        $token15\n                                    ),\n                                    $token5\n                                ),\n                            ],\n                            $token1,\n                            $token16\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '((one) (two OR three))',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                    $token5 = new GroupBeginToken('(', 7, '(', null),\n                    $token6 = new WordToken('two', 8, '', 'two'),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 12),\n                    $token8 = new WordToken('three', 15, '', 'three'),\n                    $token9 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 20),\n                    $token10 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 21),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new Group(\n                                    [\n                                        new Term($token3),\n                                    ],\n                                    $token2,\n                                    $token4\n                                ),\n                                new Group(\n                                    [\n                                        new LogicalOr(\n                                            new Term($token6),\n                                            new Term($token8),\n                                            $token7\n                                        ),\n                                    ],\n                                    $token5,\n                                    $token9\n                                ),\n                            ],\n                            $token1,\n                            $token10\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token2),\n                            $token1\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '+one AND +two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 9),\n                    $token5 = new WordToken('two', 10, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Mandatory(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new Mandatory(\n                                new Term($token5),\n                                $token4\n                            ),\n                            $token3\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '+one OR +two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 8),\n                    $token5 = new WordToken('two', 9, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Mandatory(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new Mandatory(\n                                new Term($token5),\n                                $token4\n                            ),\n                            $token3\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '+one OR +two AND +three',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 8),\n                    $token5 = new WordToken('two', 9, '', 'two'),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 13),\n                    $token7 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 17),\n                    $token8 = new WordToken('three', 18, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Mandatory(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new LogicalAnd(\n                                new Mandatory(\n                                    new Term($token5),\n                                    $token4\n                                ),\n                                new Mandatory(\n                                    new Term($token8),\n                                    $token7\n                                ),\n                                $token6\n                            ),\n                            $token3\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '+one AND +two OR +three',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 9),\n                    $token5 = new WordToken('two', 10, '', 'two'),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 14),\n                    $token7 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 17),\n                    $token8 = new WordToken('three', 18, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new LogicalAnd(\n                                new Mandatory(\n                                    new Term($token2),\n                                    $token1\n                                ),\n                                new Mandatory(\n                                    new Term($token5),\n                                    $token4\n                                ),\n                                $token3\n                            ),\n                            new Mandatory(\n                                new Term($token8),\n                                $token7\n                            ),\n                            $token6\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '+(one)',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Group(\n                                [\n                                    new Term($token3),\n                                ],\n                                $token2,\n                                $token4\n                            ),\n                            $token1\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '-one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Prohibited(\n                            new Term($token2),\n                            $token1\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '-one AND -two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 9),\n                    $token5 = new WordToken('two', 10, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Prohibited(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new Prohibited(\n                                new Term($token5),\n                                $token4\n                            ),\n                            $token3\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '-one OR -two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 8),\n                    $token5 = new WordToken('two', 9, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Prohibited(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new Prohibited(\n                                new Term($token5),\n                                $token4\n                            ),\n                            $token3\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '-one OR -two AND -three',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 8),\n                    $token5 = new WordToken('two', 9, '', 'two'),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 13),\n                    $token7 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 17),\n                    $token8 = new WordToken('three', 18, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Prohibited(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new LogicalAnd(\n                                new Prohibited(\n                                    new Term($token5),\n                                    $token4\n                                ),\n                                new Prohibited(\n                                    new Term($token8),\n                                    $token7\n                                ),\n                                $token6\n                            ),\n                            $token3\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '-one AND -two OR -three',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 9),\n                    $token5 = new WordToken('two', 10, '', 'two'),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 14),\n                    $token7 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 17),\n                    $token8 = new WordToken('three', 18, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new LogicalAnd(\n                                new Prohibited(\n                                    new Term($token2),\n                                    $token1\n                                ),\n                                new Prohibited(\n                                    new Term($token5),\n                                    $token4\n                                ),\n                                $token3\n                            ),\n                            new Prohibited(\n                                new Term($token8),\n                                $token7\n                            ),\n                            $token6\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '-(one)',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                ],\n                new Query(\n                    [\n                        new Prohibited(\n                            new Group(\n                                [\n                                    new Term($token3),\n                                ],\n                                $token2,\n                                $token4\n                            ),\n                            $token1\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '(one OR +two three)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 8),\n                    $token5 = new WordToken('two', 9, '', 'two'),\n                    $token6 = new WordToken('three', 13, '', 'three'),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 18),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalOr(\n                                    new Term($token2),\n                                    new Mandatory(\n                                        new Term($token5),\n                                        $token4\n                                    ),\n                                    $token3\n                                ),\n                                new Term($token6),\n                            ],\n                            $token1,\n                            $token7\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '(one OR -two three)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 8),\n                    $token5 = new WordToken('two', 9, '', 'two'),\n                    $token6 = new WordToken('three', 13, '', 'three'),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 18),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalOr(\n                                    new Term($token2),\n                                    new Prohibited(\n                                        new Term($token5),\n                                        $token4\n                                    ),\n                                    $token3\n                                ),\n                                new Term($token6),\n                            ],\n                            $token1,\n                            $token7\n                        ),\n                    ]\n                ),\n            ],\n            [\n                '((one))',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                    $token5 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new Group(\n                                    [\n                                        new Term($token3),\n                                    ],\n                                    $token2,\n                                    $token4\n                                ),\n                            ],\n                            $token1,\n                            $token5\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'NOT NOT one NOT NOT NOT two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    $token3 = new WordToken('one', 8, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 12),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 16),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 20),\n                    $token7 = new WordToken('two', 24, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new LogicalNot(\n                                new Term($token3),\n                                $token2\n                            ),\n                            $token1\n                        ),\n                        new LogicalNot(\n                            new LogicalNot(\n                                new LogicalNot(\n                                    new Term($token7),\n                                    $token6\n                                ),\n                                $token5\n                            ),\n                            $token4\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'NOT !one NOT !!two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 9),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 13),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 14),\n                    $token7 = new WordToken('two', 15, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new LogicalNot(\n                                new Term($token3),\n                                $token2\n                            ),\n                            $token1\n                        ),\n                        new LogicalNot(\n                            new LogicalNot(\n                                new LogicalNot(\n                                    new Term($token7),\n                                    $token6\n                                ),\n                                $token5\n                            ),\n                            $token4\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one AND NOT \"two\"',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 8),\n                    $token4 = new PhraseToken('\"two\"', 12, '', '\"', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new LogicalNot(\n                                new Term($token4),\n                                $token3\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one AND NOT @two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 8),\n                    $token4 = new UserToken('@two', 12, '@', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new LogicalNot(\n                                new Term($token4),\n                                $token3\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n            [\n                'one AND NOT #two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 8),\n                    $token4 = new TagToken('#two', 12, '#', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new LogicalNot(\n                                new Term($token4),\n                                $token3\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n            ],\n        ];\n    }\n\n    public function providerForTestQueryCorrected()\n    {\n        return [\n            [\n                'one\"',\n                'one',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_BAILOUT, '\"', 3),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BAILOUT_TOKEN_IGNORED, $token2),\n                ],\n            ],\n            [\n                'one AND two AND',\n                'one AND two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new WordToken('two', 8, '', 'two'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 12),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED, $token4),\n                ],\n            ],\n            [\n                'AND one AND two',\n                'one AND two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    $token2 = new WordToken('one', 4, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 8),\n                    $token4 = new WordToken('two', 12, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token2),\n                            new Term($token4),\n                            $token3\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token1),\n                ],\n            ],\n            [\n                'AND AND one AND AND two',\n                'one two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new WordToken('one', 8, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 12),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 16),\n                    $token6 = new WordToken('two', 20, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token3),\n                        new Term($token6),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token4, $token5),\n                ],\n            ],\n            [\n                'OR one OR two',\n                'one OR two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    $token2 = new WordToken('one', 3, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 7),\n                    $token4 = new WordToken('two', 10, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Term($token2),\n                            new Term($token4),\n                            $token3\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token1),\n                ],\n            ],\n            [\n                'OR OR one OR OR two',\n                'one two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 3),\n                    $token3 = new WordToken('one', 6, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 10),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 13),\n                    $token6 = new WordToken('two', 16, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token3),\n                        new Term($token6),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token4, $token5),\n                ],\n            ],\n            [\n                'OR OR one OR OR AND two',\n                'one two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 3),\n                    $token3 = new WordToken('one', 6, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 10),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 13),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 16),\n                    $token7 = new WordToken('two', 20, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token3),\n                        new Term($token7),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token4, $token5, $token6),\n                ],\n            ],\n            [\n                'one OR two AND OR NOT',\n                'one OR two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 4),\n                    $token3 = new WordToken('two', 7, '', 'two'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 11),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 15),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 18),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Term($token1),\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token4, $token5),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token6),\n                ],\n            ],\n            [\n                'AND OR one AND OR two AND OR three',\n                'one two three',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 4),\n                    $token3 = new WordToken('one', 7, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 11),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 15),\n                    $token6 = new WordToken('two', 18, '', 'two'),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 22),\n                    $token8 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 26),\n                    $token9 = new WordToken('three', 29, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new Term($token3),\n                        new Term($token6),\n                        new Term($token9),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token4, $token5),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token7, $token8),\n                ],\n            ],\n            [\n                'OR AND one OR AND two OR AND three',\n                'one two three',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 3),\n                    $token3 = new WordToken('one', 7, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 11),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 14),\n                    $token6 = new WordToken('two', 18, '', 'two'),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 22),\n                    $token8 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 25),\n                    $token9 = new WordToken('three', 29, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new Term($token3),\n                        new Term($token6),\n                        new Term($token9),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token4, $token5),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token7, $token8),\n                ],\n            ],\n            [\n                'one AND NOT AND two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 8),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 12),\n                    $token5 = new WordToken('two', 16, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token5),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token2, $token3, $token4),\n                ],\n            ],\n            [\n                'one NOT AND two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 8),\n                    $token4 = new WordToken('two', 12, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token4),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token2, $token3),\n                ],\n            ],\n            [\n                'one NOT AND NOT two',\n                'one NOT two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 8),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 12),\n                    $token5 = new WordToken('two', 16, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new LogicalNot(\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token2, $token3),\n                ],\n            ],\n            [\n                'one OR NOT OR two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 7),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 11),\n                    $token5 = new WordToken('two', 14, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token5),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token2, $token3, $token4),\n                ],\n            ],\n            [\n                'one NOT OR two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 8),\n                    $token4 = new WordToken('two', 11, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token4),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token2, $token3),\n                ],\n            ],\n            [\n                'one NOT OR NOT two',\n                'one NOT two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 8),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 11),\n                    $token5 = new WordToken('two', 15, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new LogicalNot(\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token2, $token3),\n                ],\n            ],\n            [\n                '(one AND two OR NOT)',\n                '(one AND two)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new WordToken('two', 9, '', 'two'),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 13),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 16),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 19),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalAnd(\n                                    new Term($token2),\n                                    new Term($token4),\n                                    $token3\n                                ),\n                            ],\n                            $token1,\n                            $token7\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token6),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED, $token5),\n                ],\n            ],\n            [\n                '(AND one OR two)',\n                '(one OR two)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 1),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 9),\n                    $token5 = new WordToken('two', 12, '', 'two'),\n                    $token6 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 15),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalOr(\n                                    new Term($token3),\n                                    new Term($token5),\n                                    $token4\n                                ),\n                            ],\n                            $token1,\n                            $token6\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                ],\n            ],\n            [\n                'AND (((OR AND one AND NOT OR))) OR NOT',\n                '(((one)))',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    $token2 = new GroupBeginToken('(', 4, '(', null),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new GroupBeginToken('(', 6, '(', null),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 7),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 10),\n                    $token7 = new WordToken('one', 14, '', 'one'),\n                    $token8 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 18),\n                    $token9 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 22),\n                    $token10 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 26),\n                    $token11 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 28),\n                    $token12 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 29),\n                    $token13 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 30),\n                    $token14 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 32),\n                    $token15 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 35),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new Group(\n                                    [\n                                        new Group(\n                                            [\n                                                new Term($token7),\n                                            ],\n                                            $token4,\n                                            $token11\n                                        ),\n                                    ],\n                                    $token3,\n                                    $token12\n                                ),\n                            ],\n                            $token2,\n                            $token13\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token5),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token6),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token8, $token9, $token10),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token15),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED, $token14),\n                ],\n            ],\n            [\n                'one ()',\n                'one',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new GroupBeginToken('(', 4, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token2, $token3),\n                ],\n            ],\n            [\n                'one (())',\n                'one',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new GroupBeginToken('(', 4, '(', null),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                    $token5 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 7),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token3, $token4),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token2, $token5),\n                ],\n            ],\n            [\n                'one AND (()) OR two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new GroupBeginToken('(', 8, '(', null),\n                    $token4 = new GroupBeginToken('(', 9, '(', null),\n                    $token5 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 10),\n                    $token6 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 11),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 13),\n                    $token8 = new WordToken('two', 16, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token8),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token4, $token5),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token2, $token3, $token6, $token7),\n                ],\n            ],\n            [\n                'one (AND OR NOT)',\n                'one',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new GroupBeginToken('(', 4, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 9),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 12),\n                    $token6 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 15),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token3),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token4),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token5),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token2, $token6),\n                ],\n            ],\n            [\n                'one) (AND)) OR NOT)',\n                'one',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 3),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 6),\n                    $token5 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 9),\n                    $token6 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 10),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 12),\n                    $token8 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 15),\n                    $token9 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 18),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_RIGHT_DELIMITER_IGNORED, $token9),\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_RIGHT_DELIMITER_IGNORED, $token6),\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_RIGHT_DELIMITER_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token4),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token3, $token5, $token7),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token8),\n                ],\n            ],\n            [\n                '(one( (AND) OR NOT((',\n                'one',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new GroupBeginToken('(', 4, '(', null),\n                    $token4 = new GroupBeginToken('(', 6, '(', null),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 7),\n                    $token6 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 10),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 12),\n                    $token8 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 15),\n                    $token9 = new GroupBeginToken('(', 18, '(', null),\n                    $token10 = new GroupBeginToken('(', 19, '(', null),\n                ],\n                new Query(\n                    [\n                        new Term($token2),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token10),\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token9),\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token3),\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token5),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token4, $token6, $token7),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token8),\n                ],\n            ],\n            [\n                'OR NOT (one OR two AND OR NOT) OR three AND NOT',\n                'NOT (one OR two) OR three',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 3),\n                    $token3 = new GroupBeginToken('(', 7, '(', null),\n                    $token4 = new WordToken('one', 8, '', 'one'),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 12),\n                    $token6 = new WordToken('two', 15, '', 'two'),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 19),\n                    $token8 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 23),\n                    $token9 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 26),\n                    $token10 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 29),\n                    $token11 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 31),\n                    $token12 = new WordToken('three', 34, '', 'three'),\n                    $token13 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 40),\n                    $token14 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 44),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new LogicalNot(\n                                new Group(\n                                    [\n                                        new LogicalOr(\n                                            new Term($token4),\n                                            new Term($token6),\n                                            $token5\n                                        ),\n                                    ],\n                                    $token3,\n                                    $token10\n                                ),\n                                $token2\n                            ),\n                            new Term($token12),\n                            $token11\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_FOLLOWING_OPERATOR_IGNORED, $token7, $token8),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token9),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token14),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED, $token13),\n                ],\n            ],\n            [\n                '+ one',\n                'one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new WordToken('one', 2, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Term($token2),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token1),\n                ],\n            ],\n            [\n                '! one',\n                'one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 0),\n                    $token2 = new WordToken('one', 2, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Term($token2),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token1),\n                ],\n            ],\n            [\n                '+++one ++two',\n                '+one +two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 1),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 2),\n                    $token4 = new WordToken('one', 3, '', 'one'),\n                    $token5 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 7),\n                    $token6 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 8),\n                    $token7 = new WordToken('two', 9, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token4),\n                            $token3\n                        ),\n                        new Mandatory(\n                            new Term($token7),\n                            $token6\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token5),\n                ],\n            ],\n            [\n                '+one + +AND +++ two',\n                '+one AND two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 7),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 8),\n                    $token6 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 12),\n                    $token7 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 13),\n                    $token8 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 14),\n                    $token9 = new WordToken('two', 16, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Mandatory(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new Term($token9),\n                            $token5\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token3),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token4),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token6),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token7),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token8),\n                ],\n            ],\n            [\n                '+one + +OR++ +two ++ +',\n                '+one OR +two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 7),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 8),\n                    $token6 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 10),\n                    $token7 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 11),\n                    $token8 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 13),\n                    $token9 = new WordToken('two', 14, '', 'two'),\n                    $token10 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 18),\n                    $token11 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 19),\n                    $token12 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 21),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Mandatory(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new Mandatory(\n                                new Term($token9),\n                                $token8\n                            ),\n                            $token5\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token3),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token4),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token6),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token7),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token10),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token11),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token12),\n                ],\n            ],\n            [\n                'NOT +one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 4),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token1),\n                ],\n            ],\n            [\n                '+(+one + +OR++ +two ++ +)',\n                '+(+one OR +two)',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 2),\n                    $token4 = new WordToken('one', 3, '', 'one'),\n                    $token5 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 7),\n                    $token6 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 9),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 10),\n                    $token8 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 12),\n                    $token9 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 13),\n                    $token10 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 15),\n                    $token11 = new WordToken('two', 16, '', 'two'),\n                    $token12 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 20),\n                    $token13 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 21),\n                    $token14 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 23),\n                    $token15 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 24),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Group(\n                                [\n                                    new LogicalOr(\n                                        new Mandatory(\n                                            new Term($token4),\n                                            $token3\n                                        ),\n                                        new Mandatory(\n                                            new Term($token11),\n                                            $token10\n                                        ),\n                                        $token7\n                                    ),\n                                ],\n                                $token2,\n                                $token15\n                            ),\n                            $token1\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token5),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token6),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token8),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token9),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token12),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token13),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token14),\n                ],\n            ],\n            [\n                '- one',\n                'one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new WordToken('one', 2, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Term($token2),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token1),\n                ],\n            ],\n            [\n                '---one --two',\n                '-one -two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 1),\n                    $token3 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 2),\n                    $token4 = new WordToken('one', 3, '', 'one'),\n                    $token5 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 7),\n                    $token6 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 8),\n                    $token7 = new WordToken('two', 9, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Prohibited(\n                            new Term($token4),\n                            $token3\n                        ),\n                        new Prohibited(\n                            new Term($token7),\n                            $token6\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token5),\n                ],\n            ],\n            [\n                '-one - -AND --- two',\n                '-one AND two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 7),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 8),\n                    $token6 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 12),\n                    $token7 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 13),\n                    $token8 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 14),\n                    $token9 = new WordToken('two', 16, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Prohibited(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new Term($token9),\n                            $token5\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token3),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token4),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token6),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token7),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token8),\n                ],\n            ],\n            [\n                '-one - -OR-- -two -- -',\n                '-one OR -two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 7),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 8),\n                    $token6 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 10),\n                    $token7 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 11),\n                    $token8 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 13),\n                    $token9 = new WordToken('two', 14, '', 'two'),\n                    $token10 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 18),\n                    $token11 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 19),\n                    $token12 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 21),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Prohibited(\n                                new Term($token2),\n                                $token1\n                            ),\n                            new Prohibited(\n                                new Term($token9),\n                                $token8\n                            ),\n                            $token5\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token3),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token4),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token6),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token7),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token10),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token11),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token12),\n                ],\n            ],\n            [\n                'NOT -one',\n                '-one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 4),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Prohibited(\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token1),\n                ],\n            ],\n            [\n                '-(-one - -OR-- -two --)-',\n                '-(-one OR -two)',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 2),\n                    $token4 = new WordToken('one', 3, '', 'one'),\n                    $token5 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 7),\n                    $token6 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 9),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 10),\n                    $token8 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 12),\n                    $token9 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 13),\n                    $token10 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 15),\n                    $token11 = new WordToken('two', 16, '', 'two'),\n                    $token12 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 20),\n                    $token13 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 21),\n                    $token15 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 22),\n                    $token14 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 23),\n                ],\n                new Query(\n                    [\n                        new Prohibited(\n                            new Group(\n                                [\n                                    new LogicalOr(\n                                        new Prohibited(\n                                            new Term($token4),\n                                            $token3\n                                        ),\n                                        new Prohibited(\n                                            new Term($token11),\n                                            $token10\n                                        ),\n                                        $token7\n                                    ),\n                                ],\n                                $token2,\n                                $token15\n                            ),\n                            $token1\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token5),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token6),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token8),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token9),\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token12),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token13),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token14),\n                ],\n            ],\n            [\n                '+NOT one',\n                'NOT one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 1),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token1),\n                ],\n            ],\n            [\n                '+AND one',\n                'one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 1),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Term($token3),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                ],\n            ],\n            [\n                '+OR one',\n                'one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 1),\n                    $token3 = new WordToken('one', 4, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Term($token3),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                ],\n            ],\n            [\n                '-NOT one',\n                'NOT one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 1),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token1),\n                ],\n            ],\n            [\n                '-AND one',\n                'one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 1),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Term($token3),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                ],\n            ],\n            [\n                '-OR one',\n                'one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 1),\n                    $token3 = new WordToken('one', 4, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Term($token3),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token1),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_LEFT_OPERAND_IGNORED, $token2),\n                ],\n            ],\n            [\n                'NOT (one',\n                'NOT one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new GroupBeginToken('(', 4, '(', null),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token3),\n                            $token1\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token2),\n                ],\n            ],\n            [\n                'NOT (one two',\n                'NOT one two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new GroupBeginToken('(', 4, '(', null),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                    $token4 = new WordToken('two', 9, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token3),\n                            $token1\n                        ),\n                        new Term($token4),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token2),\n                ],\n            ],\n            [\n                '-(one',\n                '-one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Prohibited(\n                            new Term($token3),\n                            $token1\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token2),\n                ],\n            ],\n            [\n                '-(one two',\n                '-one two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                    $token4 = new WordToken('two', 6, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Prohibited(\n                            new Term($token3),\n                            $token1\n                        ),\n                        new Term($token4),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token2),\n                ],\n            ],\n            [\n                '+(one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token3),\n                            $token1\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token2),\n                ],\n            ],\n            [\n                '+(one two',\n                '+one two',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                    $token4 = new WordToken('two', 6, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token3),\n                            $token1\n                        ),\n                        new Term($token4),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token2),\n                ],\n            ],\n            [\n                '-(one +(two NOT (three',\n                '-one +two NOT three',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new WordToken('one', 2, '', 'one'),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 6),\n                    $token5 = new GroupBeginToken('(', 7, '(', null),\n                    $token6 = new WordToken('two', 8, '', 'two'),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 12),\n                    $token8 = new GroupBeginToken('(', 16, '(', null),\n                    $token9 = new WordToken('three', 17, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new Prohibited(\n                            new Term($token3),\n                            $token1\n                        ),\n                        new Mandatory(\n                            new Term($token6),\n                            $token4\n                        ),\n                        new LogicalNot(\n                            new Term($token9),\n                            $token7\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token8),\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token5),\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token2),\n                ],\n            ],\n            [\n                'one AND NOT (two',\n                'one AND NOT two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 8),\n                    $token4 = new GroupBeginToken('(', 12, '(', null),\n                    $token5 = new WordToken('two', 13, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new LogicalNot(\n                                new Term($token5),\n                                $token3\n                            ),\n                            $token2\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNMATCHED_GROUP_LEFT_DELIMITER_IGNORED, $token4),\n                ],\n            ],\n            [\n                '(one OR two AND) AND',\n                '(one OR two)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 5),\n                    $token4 = new WordToken('two', 8, '', 'two'),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 12),\n                    $token6 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 15),\n                    $token7 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 17),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalOr(\n                                    new Term($token2),\n                                    new Term($token4),\n                                    $token3\n                                ),\n                            ],\n                            $token1,\n                            $token6\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED, $token5),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED, $token7),\n                ],\n            ],\n            [\n                '(one AND NOT +two)',\n                '(one AND +two)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 9),\n                    $token5 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 13),\n                    $token6 = new WordToken('two', 14, '', 'two'),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 17),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalAnd(\n                                    new Term($token2),\n                                    new Mandatory(\n                                        new Term($token6),\n                                        $token5\n                                    ),\n                                    $token3\n                                ),\n                            ],\n                            $token1,\n                            $token7\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token4),\n                ],\n            ],\n            [\n                '(one AND NOT -two)',\n                '(one AND -two)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 9),\n                    $token5 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 13),\n                    $token6 = new WordToken('two', 14, '', 'two'),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 17),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalAnd(\n                                    new Term($token2),\n                                    new Prohibited(\n                                        new Term($token6),\n                                        $token5\n                                    ),\n                                    $token3\n                                ),\n                            ],\n                            $token1,\n                            $token7\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token4),\n                ],\n            ],\n            [\n                '(one AND NOT -two three)',\n                '(one AND -two three)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 9),\n                    $token5 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 13),\n                    $token6 = new WordToken('two', 14, '', 'two'),\n                    $token7 = new WordToken('three', 18, '', 'three'),\n                    $token8 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 23),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalAnd(\n                                    new Term($token2),\n                                    new Prohibited(\n                                        new Term($token6),\n                                        $token5\n                                    ),\n                                    $token3\n                                ),\n                                new Term($token7),\n                            ],\n                            $token1,\n                            $token8\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token4),\n                ],\n            ],\n            [\n                '(one AND NOT +two three)',\n                '(one AND +two three)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new WordToken('one', 1, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 9),\n                    $token5 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 13),\n                    $token6 = new WordToken('two', 14, '', 'two'),\n                    $token7 = new WordToken('three', 18, '', 'three'),\n                    $token8 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 23),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalAnd(\n                                    new Term($token2),\n                                    new Mandatory(\n                                        new Term($token6),\n                                        $token5\n                                    ),\n                                    $token3\n                                ),\n                                new Term($token7),\n                            ],\n                            $token1,\n                            $token8\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token4),\n                ],\n            ],\n            [\n                '+()+one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 2),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 3),\n                    $token5 = new WordToken('one', 4, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3),\n                ],\n            ],\n            [\n                '+()!one',\n                '!one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 2),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 3),\n                    $token5 = new WordToken('one', 4, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3),\n                ],\n            ],\n            [\n                'one AND +()!two',\n                'one !two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 8),\n                    $token4 = new GroupBeginToken('(', 9, '(', null),\n                    $token5 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 10),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 11),\n                    $token7 = new WordToken('two', 12, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new LogicalNot(\n                            new Term($token7),\n                            $token6\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token2, $token3, $token4, $token5),\n                ],\n            ],\n            [\n                'NOT +()!one',\n                '!one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 4),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 7),\n                    $token6 = new WordToken('one', 8, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token6),\n                            $token5\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3, $token4),\n                ],\n            ],\n            [\n                'NOT -()!one',\n                '!one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 4),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 7),\n                    $token6 = new WordToken('one', 8, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token6),\n                            $token5\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3, $token4),\n                ],\n            ],\n            [\n                'NOT ++()!one',\n                '!one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 5),\n                    $token4 = new GroupBeginToken('(', 6, '(', null),\n                    $token5 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 7),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 8),\n                    $token7 = new WordToken('one', 9, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token7),\n                            $token6\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token3, $token4, $token5),\n                ],\n            ],\n            [\n                'NOT -+()!one',\n                '!one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 5),\n                    $token4 = new GroupBeginToken('(', 6, '(', null),\n                    $token5 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 7),\n                    $token6 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 8),\n                    $token7 = new WordToken('one', 9, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token7),\n                            $token6\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token3, $token4, $token5),\n                ],\n            ],\n            [\n                'NOT !()!one',\n                '!one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 7),\n                    $token6 = new WordToken('one', 8, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token6),\n                            $token5\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3, $token4),\n                ],\n            ],\n            [\n                'NOT +()+()!one',\n                '!one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 4),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                    $token5 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 7),\n                    $token6 = new GroupBeginToken('(', 8, '(', null),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 9),\n                    $token8 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 10),\n                    $token9 = new WordToken('one', 11, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token9),\n                            $token8\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3, $token4),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token5, $token6, $token7),\n                ],\n            ],\n            [\n                'NOT NOT +()+()!one',\n                '!one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 8),\n                    $token4 = new GroupBeginToken('(', 9, '(', null),\n                    $token5 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 10),\n                    $token6 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 11),\n                    $token7 = new GroupBeginToken('(', 12, '(', null),\n                    $token8 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 13),\n                    $token9 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 14),\n                    $token10 = new WordToken('one', 15, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token10),\n                            $token9\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3, $token4, $token5),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token6, $token7, $token8),\n                ],\n            ],\n            [\n                'one AND NOT +()+()!two',\n                'one !two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 8),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 12),\n                    $token5 = new GroupBeginToken('(', 13, '(', null),\n                    $token6 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 14),\n                    $token7 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 15),\n                    $token8 = new GroupBeginToken('(', 16, '(', null),\n                    $token9 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 17),\n                    $token10 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 18),\n                    $token11 = new WordToken('two', 19, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new LogicalNot(\n                            new Term($token11),\n                            $token10\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token2, $token3, $token4, $token5, $token6),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token7, $token8, $token9),\n                ],\n            ],\n            [\n                'one AND NOT NOT +()+()!two',\n                'one !two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 8),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 12),\n                    $token5 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 16),\n                    $token6 = new GroupBeginToken('(', 17, '(', null),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 18),\n                    $token8 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 19),\n                    $token9 = new GroupBeginToken('(', 20, '(', null),\n                    $token10 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 21),\n                    $token11 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 22),\n                    $token12 = new WordToken('two', 23, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new LogicalNot(\n                            new Term($token12),\n                            $token11\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token2, $token3, $token4, $token5, $token6, $token7),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token8, $token9, $token10),\n                ],\n            ],\n            [\n                'one -() +() two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 4),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                    $token5 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 8),\n                    $token6 = new GroupBeginToken('(', 9, '(', null),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 10),\n                    $token8 = new WordToken('two', 12, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token8),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token2, $token3, $token4),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token5, $token6, $token7),\n                ],\n            ],\n            [\n                'one !+ two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 5),\n                    $token4 = new WordToken('two', 7, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token4),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token3),\n                ],\n            ],\n            [\n                'one +! two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 4),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 5),\n                    $token4 = new WordToken('two', 7, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token4),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token3),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token2),\n                ],\n            ],\n            [\n                'one !- two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 5),\n                    $token4 = new WordToken('two', 7, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token4),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token3),\n                ],\n            ],\n            [\n                'one !AND two',\n                'one AND two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 5),\n                    $token4 = new WordToken('two', 9, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalAnd(\n                            new Term($token1),\n                            new Term($token4),\n                            $token3\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                ],\n            ],\n            [\n                'one !OR two',\n                'one OR two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 5),\n                    $token4 = new WordToken('two', 8, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Term($token1),\n                            new Term($token4),\n                            $token3\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                ],\n            ],\n            [\n                'one +! two',\n                'one two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 5),\n                    $token4 = new WordToken('two', 7, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new Term($token4),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token3),\n                ],\n            ],\n            [\n                'NOT+ one',\n                'NOT one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 3),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token3),\n                            $token1\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token2),\n                ],\n            ],\n            [\n                'NOT- one',\n                'NOT one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 3),\n                    $token3 = new WordToken('one', 5, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token3),\n                            $token1\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_UNARY_OPERATOR_MISSING_OPERAND_IGNORED, $token2),\n                ],\n            ],\n            [\n                'NOT+one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 3),\n                    $token3 = new WordToken('one', 4, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token3),\n                            $token2\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token1),\n                ],\n            ],\n            [\n                '+()NOT one',\n                'NOT one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 2),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 3),\n                    $token5 = new WordToken('one', 7, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3),\n                ],\n            ],\n            [\n                '-()NOT one',\n                'NOT one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 2),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 3),\n                    $token5 = new WordToken('one', 7, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3),\n                ],\n            ],\n            [\n                '+()NOT+()one',\n                'one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    $token2 = new GroupBeginToken('(', 1, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 2),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 3),\n                    $token5 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 6),\n                    $token6 = new GroupBeginToken('(', 7, '(', null),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 8),\n                    $token8 = new WordToken('one', 9, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Term($token8),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3),\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token4, $token5, $token6, $token7),\n                ],\n            ],\n            [\n                'NOT()+one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new GroupBeginToken('(', 3, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 4),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 5),\n                    $token5 = new WordToken('one', 6, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3),\n                ],\n            ],\n            [\n                'NOT () NOT one',\n                'NOT one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new GroupBeginToken('(', 4, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 7),\n                    $token5 = new WordToken('one', 11, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new LogicalNot(\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3),\n                ],\n            ],\n            [\n                'NOT () +one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new GroupBeginToken('(', 4, '(', null),\n                    $token3 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                    $token4 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 7),\n                    $token5 = new WordToken('one', 8, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token5),\n                            $token4\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3),\n                ],\n            ],\n            [\n                'NOT +()NOT +one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 4),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 7),\n                    $token6 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 11),\n                    $token7 = new WordToken('one', 12, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token7),\n                            $token6\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3, $token4),\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token5),\n                ],\n            ],\n            [\n                'NOT +() NOT +one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 4),\n                    $token3 = new GroupBeginToken('(', 5, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 8),\n                    $token6 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 12),\n                    $token7 = new WordToken('one', 13, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token7),\n                            $token6\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token1, $token2, $token3, $token4),\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token5),\n                ],\n            ],\n            [\n                '(+()NOT one)AND',\n                '(NOT one)',\n                [\n                    $token1 = new GroupBeginToken('(', 0, '(', null),\n                    $token2 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 1),\n                    $token3 = new GroupBeginToken('(', 2, '(', null),\n                    $token4 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 3),\n                    $token5 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    $token6 = new WordToken('one', 8, '', 'one'),\n                    $token7 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 11),\n                    $token8 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 12),\n                ],\n                new Query(\n                    [\n                        new Group(\n                            [\n                                new LogicalNot(\n                                    new Term($token6),\n                                    $token5\n                                ),\n                            ],\n                            $token1,\n                            $token7\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token2, $token3, $token4),\n                    new Correction(Parser::CORRECTION_BINARY_OPERATOR_MISSING_RIGHT_OPERAND_IGNORED, $token8),\n                ],\n            ],\n            [\n                'one !NOT two',\n                'one NOT two',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 5),\n                    $token4 = new WordToken('two', 9, '', 'two'),\n                ],\n                new Query(\n                    [\n                        new Term($token1),\n                        new LogicalNot(\n                            new Term($token4),\n                            $token3\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                ],\n            ],\n            [\n                'NOT NOT +one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 8),\n                    $token4 = new WordToken('one', 9, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token4),\n                            $token3\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token1, $token2),\n                ],\n            ],\n            [\n                'NOT !+one',\n                '+one',\n                [\n                    $token1 = new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                    $token3 = new Token(Tokenizer::TOKEN_MANDATORY, '+', 5),\n                    $token4 = new WordToken('one', 6, '', 'one'),\n                ],\n                new Query(\n                    [\n                        new Mandatory(\n                            new Term($token4),\n                            $token3\n                        ),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_ADJACENT_UNARY_OPERATOR_PRECEDING_OPERATOR_IGNORED, $token2),\n                    new Correction(Parser::CORRECTION_LOGICAL_NOT_OPERATORS_PRECEDING_PREFERENCE_IGNORED, $token1),\n                ],\n            ],\n            [\n                'one OR two AND () three',\n                'one OR two three',\n                [\n                    $token1 = new WordToken('one', 0, '', 'one'),\n                    $token2 = new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 4),\n                    $token3 = new WordToken('two', 7, '', 'two'),\n                    $token4 = new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 11),\n                    $token5 = new GroupBeginToken('(', 15, '(', null),\n                    $token6 = new Token(Tokenizer::TOKEN_GROUP_END, ')', 16),\n                    $token7 = new WordToken('three', 18, '', 'three'),\n                ],\n                new Query(\n                    [\n                        new LogicalOr(\n                            new Term($token1),\n                            new Term($token3),\n                            $token2\n                        ),\n                        new Term($token7),\n                    ]\n                ),\n                [\n                    new Correction(Parser::CORRECTION_EMPTY_GROUP_IGNORED, $token4, $token5, $token6),\n                ],\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestQuery\n     *\n     * @param string $string\n     * @param \\QueryTranslator\\Values\\Token[] $expectedTokens\n     * @param \\QueryTranslator\\Languages\\Galach\\Values\\Node\\Query $expectedTree\n     */\n    public function testQuery($string, $expectedTokens, $expectedTree)\n    {\n        $this->doTestQuery($string, $string, $expectedTokens, $expectedTree, []);\n    }\n\n    /**\n     * @dataProvider providerForTestQueryCorrected\n     *\n     * @param string $string\n     * @param string $correctedString\n     * @param \\QueryTranslator\\Values\\Token[] $expectedTokens\n     * @param \\QueryTranslator\\Languages\\Galach\\Values\\Node\\Query $query\n     * @param \\QueryTranslator\\Values\\Correction[] $corrections\n     */\n    public function testQueryCorrected($string, $correctedString, $expectedTokens, $query, $corrections)\n    {\n        $this->doTestQuery($string, $correctedString, $expectedTokens, $query, $corrections);\n    }\n\n    /**\n     * @param string $string\n     * @param string $expectedCorrectedString\n     * @param \\QueryTranslator\\Values\\Token[] $expectedTokens\n     * @param \\QueryTranslator\\Languages\\Galach\\Values\\Node\\Query $query\n     * @param \\QueryTranslator\\Values\\Correction[] $corrections\n     */\n    protected function doTestQuery($string, $expectedCorrectedString, $expectedTokens, $query, $corrections)\n    {\n        $tokenExtractor = new TokenExtractor\\Full();\n        $tokenizer = new Tokenizer($tokenExtractor);\n        $parser = new Parser();\n        $generator = $this->getNativeGenerator();\n\n        $tokenSequence = $tokenizer->tokenize($string);\n        $this->assertInstanceOf(TokenSequence::class, $tokenSequence);\n\n        $syntaxTree = $parser->parse($tokenSequence);\n        $this->assertInstanceOf(SyntaxTree::class, $syntaxTree);\n\n        $correctedString = $generator->generate($syntaxTree);\n\n        $tokensWithoutWhitespace = [];\n        foreach ($tokenSequence->tokens as $token) {\n            if ($token->type !== Tokenizer::TOKEN_WHITESPACE) {\n                $tokensWithoutWhitespace[] = $token;\n            }\n        }\n\n        $this->assertEquals($expectedCorrectedString, $correctedString);\n        $this->assertEquals($expectedTokens, $tokensWithoutWhitespace);\n        $this->assertEquals($query, $syntaxTree->rootNode);\n        $this->assertEquals($corrections, $syntaxTree->corrections);\n        $this->assertEquals($tokenSequence, $syntaxTree->tokenSequence);\n    }\n\n    /**\n     * @return \\QueryTranslator\\Languages\\Galach\\Generators\\Native\n     */\n    protected function getNativeGenerator()\n    {\n        $visitors = [];\n\n        $visitors[] = new Generators\\Native\\Group();\n        $visitors[] = new Generators\\Native\\BinaryOperator();\n        $visitors[] = new Generators\\Native\\Phrase();\n        $visitors[] = new Generators\\Native\\Query();\n        $visitors[] = new Generators\\Native\\Tag();\n        $visitors[] = new Generators\\Native\\UnaryOperator();\n        $visitors[] = new Generators\\Native\\User();\n        $visitors[] = new Generators\\Native\\Word();\n\n        $aggregate = new Generators\\Common\\Aggregate($visitors);\n\n        return new Generators\\Native($aggregate);\n    }\n}\n"
  },
  {
    "path": "tests/Galach/Tokenizer/FullTokenizerTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach\\Tokenizer;\n\nuse PHPUnit\\Framework\\TestCase;\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor;\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin as GroupBeginToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Phrase as PhraseToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Tag as TagToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\User as UserToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Word as WordToken;\nuse QueryTranslator\\Values\\Token;\nuse QueryTranslator\\Values\\TokenSequence;\n\n/**\n * Test case for tokenizer using Full token extractor.\n */\nclass FullTokenizerTest extends TestCase\n{\n    public function providerForTestTokenize()\n    {\n        return [\n            [\n                \" \\n\",\n                [\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \" \\n\", 0),\n                ],\n            ],\n            [\n                'word',\n                [\n                    new WordToken('word', 0, '', 'word'),\n                ],\n            ],\n            [\n                \"word\\n\",\n                [\n                    new WordToken('word', 0, '', 'word'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 4),\n                ],\n            ],\n            [\n                'word ',\n                [\n                    new WordToken('word', 0, '', 'word'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 4),\n                ],\n            ],\n            [\n                'word(',\n                [\n                    new WordToken('word', 0, '', 'word'),\n                    new GroupBeginToken('(', 4, '(', null),\n                ],\n            ],\n            [\n                'word)',\n                [\n                    new WordToken('word', 0, '', 'word'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 4),\n                ],\n            ],\n            [\n                'šđčćž',\n                [\n                    new WordToken('šđčćž', 0, '', 'šđčćž'),\n                ],\n            ],\n            [\n                $jajeNaOko = mb_convert_encoding('&#x1F373;', 'UTF-8', 'HTML-ENTITIES'),\n                [\n                    new WordToken($jajeNaOko, 0, '', $jajeNaOko),\n                ],\n            ],\n            [\n                $blah = mb_convert_encoding(\n                    '&#x1F469;&#x200D;&#x1F469;&#x200D;&#x1F467;&#x200D;&#x1F467;',\n                    'UTF-8',\n                    'HTML-ENTITIES'\n                ),\n                [\n                    new WordToken($blah, 0, '', $blah),\n                ],\n            ],\n            [\n                'word-word',\n                [\n                    new WordToken('word-word', 0, '', 'word-word'),\n                ],\n            ],\n            [\n                \"word\\nword\",\n                [\n                    new WordToken('word', 0, '', 'word'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 4),\n                    new WordToken('word', 5, '', 'word'),\n                ],\n            ],\n            [\n                'word word',\n                [\n                    new WordToken('word', 0, '', 'word'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 4),\n                    new WordToken('word', 5, '', 'word'),\n                ],\n            ],\n            [\n                'word\\\\ word',\n                [\n                    new WordToken('word\\\\ word', 0, '', 'word word'),\n                ],\n            ],\n            [\n                '\"phrase\"',\n                [\n                    new PhraseToken('\"phrase\"', 0, '', '\"', 'phrase'),\n                ],\n            ],\n            [\n                '\"phrase\" \"phrase\"',\n                [\n                    new PhraseToken('\"phrase\"', 0, '', '\"', 'phrase'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 8),\n                    new PhraseToken('\"phrase\"', 9, '', '\"', 'phrase'),\n                ],\n            ],\n            [\n                \"\\\"phrase\\nphrase\\\"\",\n                [\n                    new PhraseToken(\"\\\"phrase\\nphrase\\\"\", 0, '', '\"', \"phrase\\nphrase\"),\n                ],\n            ],\n            [\n                \"'phrase'\",\n                [\n                    new WordToken(\"'phrase'\", 0, '', \"'phrase'\"),\n                ],\n            ],\n            [\n                \"'phrase' 'phrase'\",\n                [\n                    new WordToken(\"'phrase'\", 0, '', \"'phrase'\"),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 8),\n                    new WordToken(\"'phrase'\", 9, '', \"'phrase'\"),\n                ],\n            ],\n            [\n                \"'phrase\\nphrase'\",\n                [\n                    new WordToken(\"'phrase\", 0, '', \"'phrase\"),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 7),\n                    new WordToken(\"phrase'\", 8, '', \"phrase'\"),\n                ],\n            ],\n            [\n                '\"phrase\\\"phrase\"',\n                [\n                    new PhraseToken('\"phrase\\\"phrase\"', 0, '', '\"', 'phrase\"phrase'),\n                ],\n            ],\n            [\n                \"'phrase\\\\'phrase'\",\n                [\n                    new WordToken(\"'phrase\\\\'phrase'\", 0, '', \"'phrase\\\\'phrase'\"),\n                ],\n            ],\n            [\n                '\"phrase\\'phrase\"',\n                [\n                    new PhraseToken('\"phrase\\'phrase\"', 0, '', '\"', 'phrase\\'phrase'),\n                ],\n            ],\n            [\n                \"'phrase\\\"phrase'\",\n                [\n                    new WordToken(\"'phrase\", 0, '', \"'phrase\"),\n                    new Token(Tokenizer::TOKEN_BAILOUT, '\"', 7),\n                    new WordToken(\"phrase'\", 8, '', \"phrase'\"),\n                ],\n            ],\n            [\n                '\\\"not_phrase\\\"',\n                [\n                    new WordToken('\\\"not_phrase\\\"', 0, '', '\"not_phrase\"'),\n                ],\n            ],\n            [\n                \"\\\\'not_phrase\\\\'\",\n                [\n                    new WordToken(\"\\\\'not_phrase\\\\'\", 0, '', \"\\\\'not_phrase\\\\'\"),\n                ],\n            ],\n            [\n                '\"phrase + - ! ( ) AND OR NOT \\\\ phrase\"',\n                [\n                    new PhraseToken(\n                        '\"phrase + - ! ( ) AND OR NOT \\\\ phrase\"',\n                        0,\n                        '',\n                        '\"',\n                        'phrase + - ! ( ) AND OR NOT \\\\ phrase'\n                    ),\n                ],\n            ],\n            [\n                \"'word + - ! ( ) AND OR NOT \\\\ word'\",\n                [\n                    new WordToken(\"'word\", 0, '', \"'word\"),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 5),\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 6),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 7),\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 8),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 9),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 10),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 11),\n                    new GroupBegin('(', 12, '(', ''),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 13),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 14),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 15),\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 16),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 19),\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 20),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 22),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 23),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 26),\n                    new WordToken(\"\\\\ word'\", 27, '', \" word'\"),\n                ],\n            ],\n            [\n                '\"phrase \\+ \\- \\! \\( \\) \\AND \\OR \\NOT \\\\\\\\ phrase\"',\n                [\n                    new PhraseToken(\n                        '\"phrase \\+ \\- \\! \\( \\) \\AND \\OR \\NOT \\\\\\\\ phrase\"',\n                        0,\n                        '',\n                        '\"',\n                        'phrase \\+ \\- \\! \\( \\) \\AND \\OR \\NOT \\\\\\\\ phrase'\n                    ),\n                ],\n            ],\n            [\n                \"'word \\\\+ \\\\- \\\\! \\\\( \\\\) \\\\AND \\\\OR \\\\NOT \\\\\\\\ word'\",\n                [\n                    new WordToken(\"'word\", 0, '', \"'word\"),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 5),\n                    new WordToken(\"\\\\+\", 6, '', '+'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 8),\n                    new WordToken(\"\\\\-\", 9, '', '-'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 11),\n                    new WordToken(\"\\\\!\", 12, '', '!'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 14),\n                    new WordToken(\"\\\\(\", 15, '', '('),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 17),\n                    new WordToken(\"\\\\)\", 18, '', ')'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 20),\n                    new WordToken(\"\\\\AND\", 21, '', '\\AND'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 25),\n                    new WordToken(\"\\\\OR\", 26, '', '\\OR'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 29),\n                    new WordToken(\"\\\\NOT\", 30, '', '\\NOT'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 34),\n                    new WordToken(\"\\\\\\\\\", 35, '', '\\\\'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 37),\n                    new WordToken(\"word'\", 38, '', \"word'\"),\n                ],\n            ],\n            [\n                '#tag',\n                [\n                    new TagToken('#tag', 0, '#', 'tag'),\n                ],\n            ],\n            [\n                '\\#tag',\n                [\n                    new WordToken('\\#tag', 0, '', '#tag'),\n                ],\n            ],\n            [\n                '#tagšđčćž',\n                [\n                    new WordToken('#tagšđčćž', 0, '', '#tagšđčćž'),\n                ],\n            ],\n            [\n                '#_tag-tag',\n                [\n                    new TagToken('#_tag-tag', 0, '#', '_tag-tag'),\n                ],\n            ],\n            [\n                '#-not-tag',\n                [\n                    new WordToken('#-not-tag', 0, '', '#-not-tag'),\n                ],\n            ],\n            [\n                '#tag+',\n                [\n                    new TagToken('#tag', 0, '#', 'tag'),\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 4),\n                ],\n            ],\n            [\n                '#tag-',\n                [\n                    new TagToken('#tag-', 0, '#', 'tag-'),\n                ],\n            ],\n            [\n                '#tag!',\n                [\n                    new TagToken('#tag', 0, '#', 'tag'),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 4),\n                ],\n            ],\n            [\n                \"#tag\\n\",\n                [\n                    new TagToken('#tag', 0, '#', 'tag'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 4),\n                ],\n            ],\n            [\n                '#tag ',\n                [\n                    new TagToken('#tag', 0, '#', 'tag'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 4),\n                ],\n            ],\n            [\n                '#tag(',\n                [\n                    new TagToken('#tag', 0, '#', 'tag'),\n                    new GroupBeginToken('(', 4, '(', null),\n                ],\n            ],\n            [\n                '#tag)',\n                [\n                    new TagToken('#tag', 0, '#', 'tag'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 4),\n                ],\n            ],\n            [\n                '@user',\n                [\n                    new UserToken('@user', 0, '@', 'user'),\n                ],\n            ],\n            [\n                '@user.user',\n                [\n                    new UserToken('@user.user', 0, '@', 'user.user'),\n                ],\n            ],\n            [\n                '\\@user',\n                [\n                    new WordToken('\\@user', 0, '', '@user'),\n                ],\n            ],\n            [\n                '@useršđčćž',\n                [\n                    new WordToken('@useršđčćž', 0, '', '@useršđčćž'),\n                ],\n            ],\n            [\n                '@_user-user',\n                [\n                    new UserToken('@_user-user', 0, '@', '_user-user'),\n                ],\n            ],\n            [\n                '@-not-user',\n                [\n                    new WordToken('@-not-user', 0, '', '@-not-user'),\n                ],\n            ],\n            [\n                '@user+',\n                [\n                    new UserToken('@user', 0, '@', 'user'),\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 5),\n                ],\n            ],\n            [\n                '@user-',\n                [\n                    new UserToken('@user-', 0, '@', 'user-'),\n                ],\n            ],\n            [\n                '@user!',\n                [\n                    new UserToken('@user', 0, '@', 'user'),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 5),\n                ],\n            ],\n            [\n                \"@user\\n\",\n                [\n                    new UserToken('@user', 0, '@', 'user'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 5),\n                ],\n            ],\n            [\n                '@user ',\n                [\n                    new UserToken('@user', 0, '@', 'user'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 5),\n                ],\n            ],\n            [\n                '@user(',\n                [\n                    new UserToken('@user', 0, '@', 'user'),\n                    new GroupBeginToken('(', 5, '(', null),\n                ],\n            ],\n            [\n                '@user)',\n                [\n                    new UserToken('@user', 0, '@', 'user'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                ],\n            ],\n            [\n                'domain:',\n                [\n                    new WordToken('domain:', 0, '', 'domain:'),\n                ],\n            ],\n            [\n                'some.domain:',\n                [\n                    new WordToken('some.domain:', 0, '', 'some.domain:'),\n                ],\n            ],\n            [\n                'domain:domain:',\n                [\n                    new WordToken('domain:domain:', 0, 'domain', 'domain:'),\n                ],\n            ],\n            [\n                'some.domain:some.domain:',\n                [\n                    new WordToken('some.domain:some.domain:', 0, 'some.domain', 'some.domain:'),\n                ],\n            ],\n            [\n                'domain:domain:domain:domain',\n                [\n                    new WordToken('domain:domain:domain:domain', 0, 'domain', 'domain:domain:domain'),\n                ],\n            ],\n            [\n                'domain\\:',\n                [\n                    new WordToken('domain\\:', 0, '', 'domain:'),\n                ],\n            ],\n            [\n                'domain\\::',\n                [\n                    new WordToken('domain\\::', 0, '', 'domain::'),\n                ],\n            ],\n            [\n                'domain:word',\n                [\n                    new WordToken('domain:word', 0, 'domain', 'word'),\n                ],\n            ],\n            [\n                'domain\\:word',\n                [\n                    new WordToken('domain\\:word', 0, '', 'domain:word'),\n                ],\n            ],\n            [\n                'domain:\"phrase\"',\n                [\n                    new PhraseToken('domain:\"phrase\"', 0, 'domain', '\"', 'phrase'),\n                ],\n            ],\n            [\n                'some.domain:\"phrase\"',\n                [\n                    new PhraseToken('some.domain:\"phrase\"', 0, 'some.domain', '\"', 'phrase'),\n                ],\n            ],\n            [\n                'domain\\:\"phrase\"',\n                [\n                    new WordToken('domain\\:', 0, '', 'domain:'),\n                    new PhraseToken('\"phrase\"', 8, '', '\"', 'phrase'),\n                ],\n            ],\n            [\n                'domain:(one)',\n                [\n                    new GroupBeginToken('domain:(', 0, '(', 'domain'),\n                    new WordToken('one', 8, '', 'one'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 11),\n                ],\n            ],\n            [\n                'some.domain:(one)',\n                [\n                    new GroupBeginToken('some.domain:(', 0, '(', 'some.domain'),\n                    new WordToken('one', 13, '', 'one'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 16),\n                ],\n            ],\n            [\n                'one AND two',\n                [\n                    new WordToken('one', 0, '', 'one'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 3),\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 4),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 7),\n                    new WordToken('two', 8, '', 'two'),\n                ],\n            ],\n            [\n                'one && two',\n                [\n                    new WordToken('one', 0, '', 'one'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 3),\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, '&&', 4),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 6),\n                    new WordToken('two', 7, '', 'two'),\n                ],\n            ],\n            [\n                'one OR two',\n                [\n                    new WordToken('one', 0, '', 'one'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 3),\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 4),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 6),\n                    new WordToken('two', 7, '', 'two'),\n                ],\n            ],\n            [\n                'one || two',\n                [\n                    new WordToken('one', 0, '', 'one'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 3),\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, '||', 4),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 6),\n                    new WordToken('two', 7, '', 'two'),\n                ],\n            ],\n            [\n                'one NOT two',\n                [\n                    new WordToken('one', 0, '', 'one'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 3),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 4),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 7),\n                    new WordToken('two', 8, '', 'two'),\n                ],\n            ],\n            [\n                'AND',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                ],\n            ],\n            [\n                'ANDword',\n                [\n                    new WordToken('ANDword', 0, '', 'ANDword'),\n                ],\n            ],\n            [\n                'wordAND',\n                [\n                    new WordToken('wordAND', 0, '', 'wordAND'),\n                ],\n            ],\n            [\n                'AND+',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 3),\n                ],\n            ],\n            [\n                'AND\\+',\n                [\n                    new WordToken('AND\\+', 0, '', 'AND+'),\n                ],\n            ],\n            [\n                '+AND',\n                [\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 1),\n                ],\n            ],\n            [\n                'AND-',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 3),\n                ],\n            ],\n            [\n                'AND\\-',\n                [\n                    new WordToken('AND\\-', 0, '', 'AND-'),\n                ],\n            ],\n            [\n                '-AND',\n                [\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 1),\n                ],\n            ],\n            [\n                'AND!',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 3),\n                ],\n            ],\n            [\n                'AND\\!',\n                [\n                    new WordToken('AND\\!', 0, '', 'AND!'),\n                ],\n            ],\n            [\n                '!AND',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 1),\n                ],\n            ],\n            [\n                \"AND\\n\",\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 3),\n                ],\n            ],\n            [\n                'AND ',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 3),\n                ],\n            ],\n            [\n                'AND(',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    new GroupBeginToken('(', 3, '(', null),\n                ],\n            ],\n            [\n                'AND)',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 3),\n                ],\n            ],\n            [\n                'ORword',\n                [\n                    new WordToken('ORword', 0, '', 'ORword'),\n                ],\n            ],\n            [\n                'ORword',\n                [\n                    new WordToken('ORword', 0, '', 'ORword'),\n                ],\n            ],\n            [\n                'OR',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                ],\n            ],\n            [\n                'OR+',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 2),\n                ],\n            ],\n            [\n                'OR\\+',\n                [\n                    new WordToken('OR\\+', 0, '', 'OR+'),\n                ],\n            ],\n            [\n                '+OR',\n                [\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 1),\n                ],\n            ],\n            [\n                'OR-',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 2),\n                ],\n            ],\n            [\n                'OR\\+',\n                [\n                    new WordToken('OR\\+', 0, '', 'OR+'),\n                ],\n            ],\n            [\n                '-OR',\n                [\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 1),\n                ],\n            ],\n            [\n                'OR!',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 2),\n                ],\n            ],\n            [\n                'OR\\!',\n                [\n                    new WordToken('OR\\!', 0, '', 'OR!'),\n                ],\n            ],\n            [\n                '!OR',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 1),\n                ],\n            ],\n            [\n                \"OR\\n\",\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 2),\n                ],\n            ],\n            [\n                'OR ',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 2),\n                ],\n            ],\n            [\n                'OR(',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    new GroupBeginToken('(', 2, '(', null),\n                ],\n            ],\n            [\n                'OR)',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 2),\n                ],\n            ],\n            [\n                'NOT',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                ],\n            ],\n            [\n                'NOTword',\n                [\n                    new WordToken('NOTword', 0, '', 'NOTword'),\n                ],\n            ],\n            [\n                'wordNOT',\n                [\n                    new WordToken('wordNOT', 0, '', 'wordNOT'),\n                ],\n            ],\n            [\n                'NOT+',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 3),\n                ],\n            ],\n            [\n                '+NOT',\n                [\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 1),\n                ],\n            ],\n            [\n                'NOT-',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 3),\n                ],\n            ],\n            [\n                '-NOT',\n                [\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 1),\n                ],\n            ],\n            [\n                'NOT!',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 3),\n                ],\n            ],\n            [\n                '!NOT',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 1),\n                ],\n            ],\n            [\n                \"NOT\\n\",\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 3),\n                ],\n            ],\n            [\n                'NOT ',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 3),\n                ],\n            ],\n            [\n                'NOT(',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    new GroupBeginToken('(', 3, '(', null),\n                ],\n            ],\n            [\n                'NOT)',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 3),\n                ],\n            ],\n            [\n                '+',\n                [\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                ],\n            ],\n            [\n                '++',\n                [\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 1),\n                ],\n            ],\n            [\n                '-',\n                [\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                ],\n            ],\n            [\n                '--',\n                [\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 1),\n                ],\n            ],\n            [\n                '!',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 0),\n                ],\n            ],\n            [\n                '!!',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 0),\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 1),\n                ],\n            ],\n            [\n                '+word',\n                [\n                    new Token(Tokenizer::TOKEN_MANDATORY, '+', 0),\n                    new WordToken('word', 1, '', 'word'),\n                ],\n            ],\n            [\n                '-word',\n                [\n                    new Token(Tokenizer::TOKEN_PROHIBITED, '-', 0),\n                    new WordToken('word', 1, '', 'word'),\n                ],\n            ],\n            [\n                '!word',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT_2, '!', 0),\n                    new WordToken('word', 1, '', 'word'),\n                ],\n            ],\n            [\n                '(word',\n                [\n                    new GroupBeginToken('(', 0, '(', null),\n                    new WordToken('word', 1, '', 'word'),\n                ],\n            ],\n            [\n                ')word',\n                [\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 0),\n                    new WordToken('word', 1, '', 'word'),\n                ],\n            ],\n            [\n                'word+',\n                [\n                    new WordToken('word+', 0, '', 'word+'),\n                ],\n            ],\n            [\n                'word-',\n                [\n                    new WordToken('word-', 0, '', 'word-'),\n                ],\n            ],\n            [\n                'word!',\n                [\n                    new WordToken('word!', 0, '', 'word!'),\n                ],\n            ],\n            [\n                'word(',\n                [\n                    new WordToken('word', 0, '', 'word'),\n                    new GroupBeginToken('(', 4, '(', null),\n                ],\n            ],\n            [\n                'word)',\n                [\n                    new WordToken('word', 0, '', 'word'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 4),\n                ],\n            ],\n            [\n                'one+two+',\n                [\n                    new WordToken('one+two+', 0, '', 'one+two+'),\n                ],\n            ],\n            [\n                'one-two-',\n                [\n                    new WordToken('one-two-', 0, '', 'one-two-'),\n                ],\n            ],\n            [\n                'one!two!',\n                [\n                    new WordToken('one!two!', 0, '', 'one!two!'),\n                ],\n            ],\n            [\n                'one(two(',\n                [\n                    new WordToken('one', 0, '', 'one'),\n                    new GroupBeginToken('(', 3, '(', null),\n                    new WordToken('two', 4, '', 'two'),\n                    new GroupBeginToken('(', 7, '(', null),\n                ],\n            ],\n            [\n                'one)two)',\n                [\n                    new WordToken('one', 0, '', 'one'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 3),\n                    new WordToken('two', 4, '', 'two'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 7),\n                ],\n            ],\n            [\n                'word\\+',\n                [\n                    new WordToken('word\\+', 0, '', 'word+'),\n                ],\n            ],\n            [\n                'word\\-',\n                [\n                    new WordToken('word\\-', 0, '', 'word-'),\n                ],\n            ],\n            [\n                'word\\!',\n                [\n                    new WordToken('word\\!', 0, '', 'word!'),\n                ],\n            ],\n            [\n                'word\\(',\n                [\n                    new WordToken('word\\(', 0, '', 'word('),\n                ],\n            ],\n            [\n                'word\\)',\n                [\n                    new WordToken('word\\)', 0, '', 'word)'),\n                ],\n            ],\n            [\n                '\\+word',\n                [\n                    new WordToken('\\+word', 0, '', '+word'),\n                ],\n            ],\n            [\n                '\\-word',\n                [\n                    new WordToken('\\-word', 0, '', '-word'),\n                ],\n            ],\n            [\n                '\\!word',\n                [\n                    new WordToken('\\!word', 0, '', '!word'),\n                ],\n            ],\n            [\n                '\\(word',\n                [\n                    new WordToken('\\(word', 0, '', '(word'),\n                ],\n            ],\n            [\n                '\\)word',\n                [\n                    new WordToken('\\)word', 0, '', ')word'),\n                ],\n            ],\n            [\n                'one\\+two\\+',\n                [\n                    new WordToken('one\\+two\\+', 0, '', 'one+two+'),\n                ],\n            ],\n            [\n                'one\\-two\\-',\n                [\n                    new WordToken('one\\-two\\-', 0, '', 'one-two-'),\n                ],\n            ],\n            [\n                'one\\!two\\!',\n                [\n                    new WordToken('one\\!two\\!', 0, '', 'one!two!'),\n                ],\n            ],\n            [\n                'one\\(two\\(',\n                [\n                    new WordToken('one\\(two\\(', 0, '', 'one(two('),\n                ],\n            ],\n            [\n                'one\\)two\\)',\n                [\n                    new WordToken('one\\)two\\)', 0, '', 'one)two)'),\n                ],\n            ],\n            [\n                'one\\\\\\\\\\)two\\\\\\\\\\(one\\\\\\\\\\+two\\\\\\\\\\-one\\\\\\\\\\!two',\n                [\n                    new WordToken(\n                        'one\\\\\\\\\\)two\\\\\\\\\\(one\\\\\\\\\\+two\\\\\\\\\\-one\\\\\\\\\\!two',\n                        0,\n                        '',\n                        'one\\)two\\(one\\+two\\-one\\!two'\n                    ),\n                ],\n            ],\n            [\n                'one\\\\\\\\)two\\\\\\\\(one\\\\\\\\+two\\\\\\\\-one\\\\\\\\!two',\n                [\n                    new WordToken(\n                        'one\\\\\\\\',\n                        0,\n                        '',\n                        'one\\\\'\n                    ),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                    new WordToken(\n                        'two\\\\\\\\',\n                        6,\n                        '',\n                        'two\\\\'\n                    ),\n                    new GroupBeginToken('(', 11, '(', null),\n                    new WordToken(\n                        'one\\\\\\\\+two\\\\\\\\-one\\\\\\\\!two',\n                        12,\n                        '',\n                        'one\\+two\\-one\\!two'\n                    ),\n                ],\n            ],\n            [\n                'one+two-one!two',\n                [\n                    new WordToken(\n                        'one+two-one!two',\n                        0,\n                        '',\n                        'one+two-one!two'\n                    ),\n                ],\n            ],\n            [\n                'one\\\\\\'two',\n                [\n                    new WordToken('one\\\\\\'two', 0, '', \"one\\\\'two\"),\n                ],\n            ],\n            [\n                'one\\\\\"two',\n                [\n                    new WordToken('one\\\\\"two', 0, '', 'one\"two'),\n                ],\n            ],\n            [\n                '\\\\',\n                [\n                    new WordToken('\\\\', 0, '', '\\\\'),\n                ],\n            ],\n            [\n                'one\\\\two',\n                [\n                    new WordToken('one\\\\two', 0, '', 'one\\\\two'),\n                ],\n            ],\n            [\n                'one\\\\\\\\+\\\\-\\\\!\\\\(\\\\)two',\n                [\n                    new WordToken('one\\\\\\\\+\\\\-\\\\!\\\\(\\\\)two', 0, '', 'one\\\\+-!()two'),\n                ],\n            ],\n            [\n                '\\\\\\\\',\n                [\n                    new WordToken('\\\\\\\\', 0, '', '\\\\'),\n                ],\n            ],\n            [\n                '(type:)',\n                [\n                    new GroupBeginToken('(', 0, '(', null),\n                    new WordToken('type:', 1, '', 'type:'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 6),\n                ],\n            ],\n            [\n                'type: AND',\n                [\n                    new WordToken('type:', 0, '', 'type:'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 5),\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 6),\n                ],\n            ],\n            [\n                \"word'\",\n                [\n                    new WordToken(\"word'\", 0, '', \"word'\"),\n                ],\n            ],\n            [\n                'one\\'two',\n                [\n                    new WordToken(\"one'two\", 0, '', \"one'two\"),\n                ],\n            ],\n            [\n                \"AND'\",\n                [\n                    new WordToken(\"AND'\", 0, '', \"AND'\"),\n                ],\n            ],\n            [\n                \"OR'\",\n                [\n                    new WordToken(\"OR'\", 0, '', \"OR'\"),\n                ],\n            ],\n            [\n                \"NOT'\",\n                [\n                    new WordToken(\"NOT'\", 0, '', \"NOT'\"),\n                ],\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestTokenize\n     *\n     * @param string $string\n     * @param \\QueryTranslator\\Values\\Token[] $expectedTokens\n     */\n    public function testTokenize($string, array $expectedTokens)\n    {\n        $tokenExtractor = $this->getTokenExtractor();\n        $tokenizer = new Tokenizer($tokenExtractor);\n\n        $tokenSequence = $tokenizer->tokenize($string);\n\n        $this->assertInstanceOf(TokenSequence::class, $tokenSequence);\n        $this->assertEquals($expectedTokens, $tokenSequence->tokens);\n        $this->assertEquals($string, $tokenSequence->source);\n    }\n\n    public function providerForTestTokenizeNotRecognized()\n    {\n        return [\n            [\n                (\n                    $blah = mb_convert_encoding(\n                        '&#x1F469;&#x200D;&#x1F469;&#x200D;&#x1F467;&#x200D;&#x1F467;',\n                        'UTF-8',\n                        'HTML-ENTITIES'\n                    )\n                ) . '\"',\n                [\n                    new WordToken($blah, 0, '', $blah),\n                    new Token(Tokenizer::TOKEN_BAILOUT, '\"', 7),\n                ],\n            ],\n            [\n                '\"' . $blah,\n                [\n                    new Token(Tokenizer::TOKEN_BAILOUT, '\"', 0),\n                    new WordToken($blah, 1, '', $blah),\n                ],\n            ],\n            [\n                'word\"',\n                [\n                    new WordToken('word', 0, '', 'word'),\n                    new Token(Tokenizer::TOKEN_BAILOUT, '\"', 4),\n                ],\n            ],\n            [\n                'one\"two',\n                [\n                    new WordToken('one', 0, '', 'one'),\n                    new Token(Tokenizer::TOKEN_BAILOUT, '\"', 3),\n                    new WordToken('two', 4, '', 'two'),\n                ],\n            ],\n            [\n                'šđ\"čćž',\n                [\n                    new WordToken('šđ', 0, '', 'šđ'),\n                    new Token(Tokenizer::TOKEN_BAILOUT, '\"', 2),\n                    new WordToken('čćž', 3, '', 'čćž'),\n                ],\n            ],\n            [\n                'AND\"',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_AND, 'AND', 0),\n                    new Token(Tokenizer::TOKEN_BAILOUT, '\"', 3),\n                ],\n            ],\n            [\n                'OR\"',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_OR, 'OR', 0),\n                    new Token(Tokenizer::TOKEN_BAILOUT, '\"', 2),\n                ],\n            ],\n            [\n                'NOT\"',\n                [\n                    new Token(Tokenizer::TOKEN_LOGICAL_NOT, 'NOT', 0),\n                    new Token(Tokenizer::TOKEN_BAILOUT, '\"', 3),\n                ],\n            ],\n        ];\n    }\n\n    /**\n     * @dataProvider providerForTestTokenizeNotRecognized\n     *\n     * @param string $string\n     * @param \\QueryTranslator\\Values\\Token[] $expectedTokens\n     */\n    public function testTokenizeNotRecognized($string, array $expectedTokens)\n    {\n        $tokenExtractor = $this->getTokenExtractor();\n        $tokenizer = new Tokenizer($tokenExtractor);\n\n        $tokenSequence = $tokenizer->tokenize($string);\n\n        $this->assertInstanceOf(TokenSequence::class, $tokenSequence);\n        $this->assertEquals($expectedTokens, $tokenSequence->tokens);\n        $this->assertEquals($string, $tokenSequence->source);\n    }\n\n    /**\n     * @return \\QueryTranslator\\Languages\\Galach\\TokenExtractor\n     */\n    protected function getTokenExtractor()\n    {\n        return new TokenExtractor\\Full();\n    }\n}\n"
  },
  {
    "path": "tests/Galach/Tokenizer/TextTokenizerTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach\\Tokenizer;\n\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor;\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\GroupBegin as GroupBeginToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Phrase as PhraseToken;\nuse QueryTranslator\\Languages\\Galach\\Values\\Token\\Word as WordToken;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * Test case for tokenizer using Text token extractor.\n *\n * This inherits from FullTokenizerTest and overrides fixtures that behave differently.\n */\nclass TextTokenizerTest extends FullTokenizerTest\n{\n    /**\n     * @var array\n     */\n    protected static $fixtureOverride;\n\n    /**\n     * @dataProvider providerForTestTokenize\n     *\n     * @param string $string\n     * @param array $expectedTokens\n     */\n    public function testTokenize($string, array $expectedTokens)\n    {\n        $expectedTokens = $this->getExpectedFixtureWithOverride($string, $expectedTokens);\n        parent::testTokenize($string, $expectedTokens);\n    }\n\n    /**\n     * @param string $string\n     * @param array $expectedTokens\n     *\n     * @return \\QueryTranslator\\Values\\Token[]\n     */\n    protected function getExpectedFixtureWithOverride($string, array $expectedTokens)\n    {\n        $this->setFixtureOverride();\n\n        if (isset(self::$fixtureOverride[$string])) {\n            return self::$fixtureOverride[$string];\n        }\n\n        return $expectedTokens;\n    }\n\n    protected function setFixtureOverride()\n    {\n        if (self::$fixtureOverride === null) {\n            self::$fixtureOverride = [\n                '#tag' => [\n                    new WordToken('#tag', 0, '', '#tag'),\n                ],\n                '\\#tag' => [\n                    new WordToken('\\#tag', 0, '', '\\#tag'),\n                ],\n                '#_tag-tag' => [\n                    new WordToken('#_tag-tag', 0, '', '#_tag-tag'),\n                ],\n                '#tag+' => [\n                    new WordToken('#tag+', 0, '', '#tag+'),\n                ],\n                '#tag-' => [\n                    new WordToken('#tag-', 0, '', '#tag-'),\n                ],\n                '#tag!' => [\n                    new WordToken('#tag!', 0, '', '#tag!'),\n                ],\n                \"#tag\\n\" => [\n                    new WordToken('#tag', 0, '', '#tag'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 4),\n                ],\n                '#tag ' => [\n                    new WordToken('#tag', 0, '', '#tag'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 4),\n                ],\n                '#tag(' => [\n                    new WordToken('#tag', 0, '', '#tag'),\n                    new GroupBeginToken('(', 4, '(', null),\n                ],\n                '#tag)' => [\n                    new WordToken('#tag', 0, '', '#tag'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 4),\n                ],\n                '@user' => [\n                    new WordToken('@user', 0, '', '@user'),\n                ],\n                '@user.user' => [\n                    new WordToken('@user.user', 0, '', '@user.user'),\n                ],\n                '\\@user' => [\n                    new WordToken('\\@user', 0, '', '\\@user'),\n                ],\n                '@_user-user' => [\n                    new WordToken('@_user-user', 0, '', '@_user-user'),\n                ],\n                '@user+' => [\n                    new WordToken('@user+', 0, '', '@user+'),\n                ],\n                '@user-' => [\n                    new WordToken('@user-', 0, '', '@user-'),\n                ],\n                '@user!' => [\n                    new WordToken('@user!', 0, '', '@user!'),\n                ],\n                \"@user\\n\" => [\n                    new WordToken('@user', 0, '', '@user'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, \"\\n\", 5),\n                ],\n                '@user ' => [\n                    new WordToken('@user', 0, '', '@user'),\n                    new Token(Tokenizer::TOKEN_WHITESPACE, ' ', 5),\n                ],\n                '@user(' => [\n                    new WordToken('@user', 0, '', '@user'),\n                    new GroupBeginToken('(', 5, '(', null),\n                ],\n                '@user)' => [\n                    new WordToken('@user', 0, '', '@user'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 5),\n                ],\n                'domain:domain:' => [\n                    new WordToken('domain:domain:', 0, '', 'domain:domain:'),\n                ],\n                'some.domain:some.domain:' => [\n                    new WordToken('some.domain:some.domain:', 0, '', 'some.domain:some.domain:'),\n                ],\n                'domain:domain:domain:domain' => [\n                    new WordToken('domain:domain:domain:domain', 0, '', 'domain:domain:domain:domain'),\n                ],\n                'domain\\:' => [\n                    new WordToken('domain\\:', 0, '', 'domain\\:'),\n                ],\n                'domain\\::' => [\n                    new WordToken('domain\\::', 0, '', 'domain\\::'),\n                ],\n                'domain:word' => [\n                    new WordToken('domain:word', 0, '', 'domain:word'),\n                ],\n                'domain\\:word' => [\n                    new WordToken('domain\\:word', 0, '', 'domain\\:word'),\n                ],\n                'domain:\"phrase\"' => [\n                    new WordToken('domain:', 0, '', 'domain:'),\n                    new PhraseToken('\"phrase\"', 7, '', '\"', 'phrase'),\n                ],\n                'some.domain:\"phrase\"' => [\n                    new WordToken('some.domain:', 0, '', 'some.domain:'),\n                    new PhraseToken('\"phrase\"', 12, '', '\"', 'phrase'),\n                ],\n                'domain\\:\"phrase\"' => [\n                    new WordToken('domain\\:', 0, '', 'domain\\:'),\n                    new PhraseToken('\"phrase\"', 8, '', '\"', 'phrase'),\n                ],\n                'domain:(one)' => [\n                    new WordToken('domain:', 0, '', 'domain:'),\n                    new GroupBeginToken('(', 7, '(', ''),\n                    new WordToken('one', 8, '', 'one'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 11),\n                ],\n                'some.domain:(one)' => [\n                    new WordToken('some.domain:', 0, '', 'some.domain:'),\n                    new GroupBeginToken('(', 12, '(', ''),\n                    new WordToken('one', 13, '', 'one'),\n                    new Token(Tokenizer::TOKEN_GROUP_END, ')', 16),\n                ],\n            ];\n        }\n    }\n\n    /**\n     * @return \\QueryTranslator\\Languages\\Galach\\TokenExtractor\n     */\n    protected function getTokenExtractor()\n    {\n        return new TokenExtractor\\Text();\n    }\n}\n"
  },
  {
    "path": "tests/Galach/Tokenizer/TokenExtractorTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach\\Tokenizer;\n\nuse PHPUnit\\Framework\\TestCase;\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor;\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor\\Full;\nuse QueryTranslator\\Languages\\Galach\\TokenExtractor\\Text;\nuse QueryTranslator\\Languages\\Galach\\Tokenizer;\nuse RuntimeException;\n\n/**\n * Text case for TokenExtractor.\n */\nclass TokenExtractorTest extends TestCase\n{\n    public function testExtractThrowsExceptionPCRE()\n    {\n        $this->expectException(RuntimeException::class);\n        $this->expectExceptionMessage('PCRE regex error code: 2');\n\n        /** @var \\QueryTranslator\\Languages\\Galach\\TokenExtractor|\\PHPUnit_Framework_MockObject_MockObject $extractor */\n        $extractor = $this->getMockBuilder(TokenExtractor::class)\n            ->setMethods(['getExpressionTypeMap'])\n            ->getMockForAbstractClass();\n\n        $extractor->expects($this->once())\n            ->method('getExpressionTypeMap')\n            ->willReturn(\n                [\n                    '/(?:\\D+|<\\d+>)*[!?]/' => Tokenizer::TOKEN_WHITESPACE,\n                ]\n            );\n\n        $extractor->extract('foobar foobar foobar', 0);\n    }\n\n    public function testFullExtractTermTokenThrowsException()\n    {\n        $this->expectException(RuntimeException::class);\n        $this->expectExceptionMessage('Could not extract term token from the given data');\n\n        $extractor = new Full();\n        $reflectedClass = new \\ReflectionClass($extractor);\n        $reflectedProperty = $reflectedClass->getProperty('expressionTypeMap');\n        $reflectedProperty->setAccessible(true);\n        $reflectedProperty->setValue(\n            null,\n            [\n                '/(?<lexeme>foobar)/' => Tokenizer::TOKEN_TERM,\n            ]\n        );\n\n        $extractor->extract('foobar', 0);\n    }\n\n    public function testTextExtractTermTokenThrowsException()\n    {\n        $this->expectException(RuntimeException::class);\n        $this->expectExceptionMessage('Could not extract term token from the given data');\n\n        $extractor = new Text();\n        $reflectedClass = new \\ReflectionClass($extractor);\n        $reflectedProperty = $reflectedClass->getProperty('expressionTypeMap');\n        $reflectedProperty->setAccessible(true);\n        $reflectedProperty->setValue(\n            null,\n            [\n                '/(?<lexeme>foobar)/' => Tokenizer::TOKEN_TERM,\n            ]\n        );\n\n        $extractor->extract('foobar', 0);\n    }\n}\n"
  },
  {
    "path": "tests/Galach/Values/NodeTraversalTest.php",
    "content": "<?php\n\nnamespace QueryTranslator\\Tests\\Galach\\Tokenizer;\n\nuse PHPUnit\\Framework\\TestCase;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Group;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalAnd;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalNot;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\LogicalOr;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Mandatory;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Prohibited;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Query;\nuse QueryTranslator\\Languages\\Galach\\Values\\Node\\Term;\nuse QueryTranslator\\Values\\Node;\nuse QueryTranslator\\Values\\Token;\n\n/**\n * Test case for node tree traversal.\n */\nclass NodeTraversalTest extends TestCase\n{\n    public function testGroupNode()\n    {\n        $firstMember = $this->getMockForAbstractClass(Node::class);\n        $secondMember = $this->getMockForAbstractClass(Node::class);\n\n        $nodes = (new Group([$firstMember, $secondMember]))->getNodes();\n\n        $this->assertSame($firstMember, $nodes[0]);\n        $this->assertSame($secondMember, $nodes[1]);\n    }\n\n    public function testLogicalAndNode()\n    {\n        $leftOperand = $this->getMockForAbstractClass(Node::class);\n        $rightOperand = $this->getMockForAbstractClass(Node::class);\n\n        $nodes = (new LogicalAnd($leftOperand, $rightOperand))->getNodes();\n\n        $this->assertSame($leftOperand, $nodes[0]);\n        $this->assertSame($rightOperand, $nodes[1]);\n    }\n\n    public function testLogicalNotNode()\n    {\n        $operand = $this->getMockForAbstractClass(Node::class);\n\n        $nodes = (new LogicalNot($operand))->getNodes();\n\n        $this->assertSame($operand, $nodes[0]);\n    }\n\n    public function testLogicalOrNode()\n    {\n        $leftOperand = $this->getMockForAbstractClass(Node::class);\n        $rightOperand = $this->getMockForAbstractClass(Node::class);\n\n        $nodes = (new LogicalOr($leftOperand, $rightOperand))->getNodes();\n\n        $this->assertSame($leftOperand, $nodes[0]);\n        $this->assertSame($rightOperand, $nodes[1]);\n    }\n\n    public function testMandatoryNode()\n    {\n        $operand = $this->getMockForAbstractClass(Node::class);\n\n        $nodes = (new Mandatory($operand))->getNodes();\n\n        $this->assertSame($operand, $nodes[0]);\n    }\n\n    public function testProhibitedNode()\n    {\n        $operand = $this->getMockForAbstractClass(Node::class);\n\n        $nodes = (new Prohibited($operand))->getNodes();\n\n        $this->assertSame($operand, $nodes[0]);\n    }\n\n    public function testQueryNode()\n    {\n        $firstMember = $this->getMockForAbstractClass(Node::class);\n        $secondMember = $this->getMockForAbstractClass(Node::class);\n\n        $nodes = (new Query([$firstMember, $secondMember]))->getNodes();\n\n        $this->assertSame($firstMember, $nodes[0]);\n        $this->assertSame($secondMember, $nodes[1]);\n    }\n\n    public function testTermNode()\n    {\n        /** @var \\QueryTranslator\\Values\\Token $token */\n        $token = $this->getMockBuilder(Token::class)->disableOriginalConstructor()->getMock();\n\n        $nodes = (new Term($token))->getNodes();\n\n        $this->assertEmpty($nodes);\n    }\n}\n"
  },
  {
    "path": "tests/bootstrap.php",
    "content": "<?php\n\n$autoload = __DIR__ . '/../vendor/autoload.php';\nif (!file_exists($autoload)) {\n    throw new RuntimeException('Install dependencies using composer to run the test suite.');\n}\n\nrequire_once $autoload;\n"
  }
]