Copy disabled (too large)
Download .txt
Showing preview only (60,413K chars total). Download the full file to get everything.
Repository: openbibleinfo/Bible-Passage-Reference-Parser
Branch: master
Commit: 4eb78e1d56ba
Files: 459
Total size: 55.2 MB
Directory structure:
gitextract_krchq58f/
├── .gitignore
├── LICENSE.md
├── Readme.md
├── bin/
│ ├── 01.add_lang.pl
│ ├── add_cross_lang.pl
│ ├── build_lang.sh
│ ├── fuzz_lang.js
│ ├── letters/
│ │ ├── blocks.txt
│ │ └── letters.txt
│ └── make_regexps.js
├── cjs/
│ ├── ar_bcv_parser.js
│ ├── ascii_bcv_parser.js
│ ├── bg_bcv_parser.js
│ ├── ceb_bcv_parser.js
│ ├── cs_bcv_parser.js
│ ├── da_bcv_parser.js
│ ├── de_bcv_parser.js
│ ├── el_bcv_parser.js
│ ├── en_bcv_parser.js
│ ├── es_bcv_parser.js
│ ├── fa_bcv_parser.js
│ ├── fi_bcv_parser.js
│ ├── fr_bcv_parser.js
│ ├── full_bcv_parser.js
│ ├── he_bcv_parser.js
│ ├── hi_bcv_parser.js
│ ├── hr_bcv_parser.js
│ ├── ht_bcv_parser.js
│ ├── hu_bcv_parser.js
│ ├── id_bcv_parser.js
│ ├── is_bcv_parser.js
│ ├── it_bcv_parser.js
│ ├── ja_bcv_parser.js
│ ├── jv_bcv_parser.js
│ ├── ko_bcv_parser.js
│ ├── la_bcv_parser.js
│ ├── mk_bcv_parser.js
│ ├── mr_bcv_parser.js
│ ├── ne_bcv_parser.js
│ ├── nl_bcv_parser.js
│ ├── no_bcv_parser.js
│ ├── or_bcv_parser.js
│ ├── pa_bcv_parser.js
│ ├── package.json
│ ├── pl_bcv_parser.js
│ ├── pt_bcv_parser.js
│ ├── ro_bcv_parser.js
│ ├── ru_bcv_parser.js
│ ├── sk_bcv_parser.js
│ ├── so_bcv_parser.js
│ ├── sq_bcv_parser.js
│ ├── sr_bcv_parser.js
│ ├── sv_bcv_parser.js
│ ├── sw_bcv_parser.js
│ ├── ta_bcv_parser.js
│ ├── th_bcv_parser.js
│ ├── tl_bcv_parser.js
│ ├── tr_bcv_parser.js
│ ├── uk_bcv_parser.js
│ ├── ur_bcv_parser.js
│ ├── vi_bcv_parser.js
│ └── zh_bcv_parser.js
├── esm/
│ ├── bcv_parser.d.ts
│ ├── bcv_parser.js
│ └── lang/
│ ├── ar.d.ts
│ ├── ar.js
│ ├── ascii.d.ts
│ ├── ascii.js
│ ├── bg.d.ts
│ ├── bg.js
│ ├── ceb.d.ts
│ ├── ceb.js
│ ├── cs.d.ts
│ ├── cs.js
│ ├── da.d.ts
│ ├── da.js
│ ├── de.d.ts
│ ├── de.js
│ ├── el.d.ts
│ ├── el.js
│ ├── en.d.ts
│ ├── en.js
│ ├── es.d.ts
│ ├── es.js
│ ├── fa.d.ts
│ ├── fa.js
│ ├── fi.d.ts
│ ├── fi.js
│ ├── fr.d.ts
│ ├── fr.js
│ ├── full.d.ts
│ ├── full.js
│ ├── he.d.ts
│ ├── he.js
│ ├── hi.d.ts
│ ├── hi.js
│ ├── hr.d.ts
│ ├── hr.js
│ ├── ht.d.ts
│ ├── ht.js
│ ├── hu.d.ts
│ ├── hu.js
│ ├── id.d.ts
│ ├── id.js
│ ├── is.d.ts
│ ├── is.js
│ ├── it.d.ts
│ ├── it.js
│ ├── ja.d.ts
│ ├── ja.js
│ ├── jv.d.ts
│ ├── jv.js
│ ├── ko.d.ts
│ ├── ko.js
│ ├── la.d.ts
│ ├── la.js
│ ├── mk.d.ts
│ ├── mk.js
│ ├── mr.d.ts
│ ├── mr.js
│ ├── ne.d.ts
│ ├── ne.js
│ ├── nl.d.ts
│ ├── nl.js
│ ├── no.d.ts
│ ├── no.js
│ ├── or.d.ts
│ ├── or.js
│ ├── pa.d.ts
│ ├── pa.js
│ ├── pl.d.ts
│ ├── pl.js
│ ├── pt.d.ts
│ ├── pt.js
│ ├── ro.d.ts
│ ├── ro.js
│ ├── ru.d.ts
│ ├── ru.js
│ ├── sk.d.ts
│ ├── sk.js
│ ├── so.d.ts
│ ├── so.js
│ ├── sq.d.ts
│ ├── sq.js
│ ├── sr.d.ts
│ ├── sr.js
│ ├── sv.d.ts
│ ├── sv.js
│ ├── sw.d.ts
│ ├── sw.js
│ ├── ta.d.ts
│ ├── ta.js
│ ├── th.d.ts
│ ├── th.js
│ ├── tl.d.ts
│ ├── tl.js
│ ├── tr.d.ts
│ ├── tr.js
│ ├── uk.d.ts
│ ├── uk.js
│ ├── ur.d.ts
│ ├── ur.js
│ ├── vi.d.ts
│ ├── vi.js
│ ├── zh.d.ts
│ └── zh.js
├── js/
│ ├── ar_bcv_parser.js
│ ├── ascii_bcv_parser.js
│ ├── bg_bcv_parser.js
│ ├── ceb_bcv_parser.js
│ ├── cs_bcv_parser.js
│ ├── da_bcv_parser.js
│ ├── de_bcv_parser.js
│ ├── el_bcv_parser.js
│ ├── en_bcv_parser.js
│ ├── es_bcv_parser.js
│ ├── fi_bcv_parser.js
│ ├── fr_bcv_parser.js
│ ├── full_bcv_parser.js
│ ├── he_bcv_parser.js
│ ├── hi_bcv_parser.js
│ ├── hr_bcv_parser.js
│ ├── ht_bcv_parser.js
│ ├── hu_bcv_parser.js
│ ├── is_bcv_parser.js
│ ├── it_bcv_parser.js
│ ├── ja_bcv_parser.js
│ ├── jv_bcv_parser.js
│ ├── ko_bcv_parser.js
│ ├── la_bcv_parser.js
│ ├── mk_bcv_parser.js
│ ├── mr_bcv_parser.js
│ ├── ne_bcv_parser.js
│ ├── nl_bcv_parser.js
│ ├── no_bcv_parser.js
│ ├── or_bcv_parser.js
│ ├── pa_bcv_parser.js
│ ├── package.json
│ ├── pl_bcv_parser.js
│ ├── pt_bcv_parser.js
│ ├── ro_bcv_parser.js
│ ├── ru_bcv_parser.js
│ ├── sk_bcv_parser.js
│ ├── so_bcv_parser.js
│ ├── sq_bcv_parser.js
│ ├── sr_bcv_parser.js
│ ├── sv_bcv_parser.js
│ ├── sw_bcv_parser.js
│ ├── ta_bcv_parser.js
│ ├── th_bcv_parser.js
│ ├── tl_bcv_parser.js
│ ├── tr_bcv_parser.js
│ ├── uk_bcv_parser.js
│ ├── ur_bcv_parser.js
│ ├── vi_bcv_parser.js
│ └── zh_bcv_parser.js
├── package.json
├── src/
│ ├── ar/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ascii/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── bg/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ceb/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── core/
│ │ ├── bcv_grammar.pegjs
│ │ ├── bcv_matcher.ts
│ │ ├── bcv_options.ts
│ │ ├── bcv_parser.ts
│ │ ├── bcv_passage.ts
│ │ ├── bcv_regexps_manager.ts
│ │ ├── bcv_translations_manager.ts
│ │ ├── lang.d.ts
│ │ ├── lang_bundle.ts
│ │ ├── lang_grammar_options.ts
│ │ ├── lang_regexps.ts
│ │ ├── lang_spec.js
│ │ ├── lang_specrunner.html
│ │ ├── lang_translations.ts
│ │ ├── peg_plugin.js
│ │ └── types.d.ts
│ ├── cs/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── da/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── de/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── el/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── en/
│ │ ├── book_names.txt
│ │ ├── data.txt
│ │ ├── psalm_cb.js
│ │ └── translation_additions.js
│ ├── es/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── fa/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── fi/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── fr/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── full/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── he/
│ │ ├── book_names.txt
│ │ ├── data.txt
│ │ ├── spec_additions.js
│ │ └── translation_additions.js
│ ├── hi/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── hr/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ht/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── hu/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── id/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── is/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── it/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ja/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── jv/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ko/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── la/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── mk/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── mr/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ne/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── nl/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── no/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── or/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── pa/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── pl/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── pt/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ro/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ru/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── sk/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── so/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── sq/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── sr/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── sv/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── sw/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ta/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── template/
│ │ ├── SpecRunner.html
│ │ ├── data.txt
│ │ ├── grammar.pegjs
│ │ ├── regexps.coffee
│ │ ├── research.xlsx
│ │ ├── spec.coffee
│ │ └── translations.coffee
│ ├── th/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── tl/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── tr/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── uk/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── ur/
│ │ ├── book_names.txt
│ │ └── data.txt
│ ├── vi/
│ │ ├── book_names.txt
│ │ └── data.txt
│ └── zh/
│ ├── book_names.txt
│ └── data.txt
├── test/
│ ├── apocrypha.spec.js
│ ├── cjs.spec.cjs
│ ├── compaction.spec.js
│ ├── docs.spec.js
│ ├── existence.spec.js
│ ├── html/
│ │ ├── ar.html
│ │ ├── ascii.html
│ │ ├── bg.html
│ │ ├── ceb.html
│ │ ├── cs.html
│ │ ├── da.html
│ │ ├── de.html
│ │ ├── el.html
│ │ ├── en.html
│ │ ├── es.html
│ │ ├── fa.html
│ │ ├── fi.html
│ │ ├── fr.html
│ │ ├── full.html
│ │ ├── he.html
│ │ ├── hi.html
│ │ ├── hr.html
│ │ ├── ht.html
│ │ ├── hu.html
│ │ ├── id.html
│ │ ├── is.html
│ │ ├── it.html
│ │ ├── ja.html
│ │ ├── jv.html
│ │ ├── ko.html
│ │ ├── la.html
│ │ ├── mk.html
│ │ ├── mr.html
│ │ ├── ne.html
│ │ ├── nl.html
│ │ ├── no.html
│ │ ├── or.html
│ │ ├── pa.html
│ │ ├── pl.html
│ │ ├── pt.html
│ │ ├── ro.html
│ │ ├── ru.html
│ │ ├── sk.html
│ │ ├── so.html
│ │ ├── sq.html
│ │ ├── sr.html
│ │ ├── sv.html
│ │ ├── sw.html
│ │ ├── ta.html
│ │ ├── th.html
│ │ ├── tl.html
│ │ ├── tr.html
│ │ ├── uk.html
│ │ ├── ur.html
│ │ ├── vi.html
│ │ └── zh.html
│ ├── lang/
│ │ ├── ar.spec.js
│ │ ├── ascii.spec.js
│ │ ├── bg.spec.js
│ │ ├── ceb.spec.js
│ │ ├── cs.spec.js
│ │ ├── da.spec.js
│ │ ├── de.spec.js
│ │ ├── el.spec.js
│ │ ├── en.spec.js
│ │ ├── es.spec.js
│ │ ├── fa.spec.js
│ │ ├── fi.spec.js
│ │ ├── fr.spec.js
│ │ ├── full.spec.js
│ │ ├── he.spec.js
│ │ ├── hi.spec.js
│ │ ├── hr.spec.js
│ │ ├── ht.spec.js
│ │ ├── hu.spec.js
│ │ ├── id.spec.js
│ │ ├── is.spec.js
│ │ ├── it.spec.js
│ │ ├── ja.spec.js
│ │ ├── jv.spec.js
│ │ ├── ko.spec.js
│ │ ├── la.spec.js
│ │ ├── mk.spec.js
│ │ ├── mr.spec.js
│ │ ├── ne.spec.js
│ │ ├── nl.spec.js
│ │ ├── no.spec.js
│ │ ├── or.spec.js
│ │ ├── pa.spec.js
│ │ ├── pl.spec.js
│ │ ├── pt.spec.js
│ │ ├── ro.spec.js
│ │ ├── ru.spec.js
│ │ ├── sk.spec.js
│ │ ├── so.spec.js
│ │ ├── sq.spec.js
│ │ ├── sr.spec.js
│ │ ├── sv.spec.js
│ │ ├── sw.spec.js
│ │ ├── ta.spec.js
│ │ ├── th.spec.js
│ │ ├── tl.spec.js
│ │ ├── tr.spec.js
│ │ ├── uk.spec.js
│ │ ├── ur.spec.js
│ │ ├── vi.spec.js
│ │ └── zh.spec.js
│ ├── parse.spec.js
│ ├── preparse.spec.js
│ ├── realworld.spec.js
│ ├── regexps.spec.js
│ └── translations.spec.js
└── tsconfig.json
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
node_modules
================================================
FILE: LICENSE.md
================================================
Copyright (c) 2011-2026 Stephen Smith
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
================================================
FILE: Readme.md
================================================
# Bible Passage Reference Parser
This project is a Typescript implementation of a Bible-passage reference parser (seeing `John 3:16`, for example, and both understanding that it's a Bible reference and converting it into a form that computers can process). It parses Bible **B**ooks, **C**hapters, and **V**erses—thus the file names involving "BCV Parser."
Its primary use is to interpret query strings for use in a Bible application. As such, it is designed to handle typos and ambiguous references. It can extract BCVs from text but may be too aggressive for some uses. (See [Caveats](#caveats).)
It should be fairly speedy for most applications, taking under a millisecond to parse a short string and able to parse about 175KB of reference-heavy text per second on a single core.
The code for a single language occupies about 147KB minified and 30KB gzipped.
This project also provides extensively commented code and 4.7 million real-world strings that you can use as a starting point to build your own BCV parser.
Try a [demo of the Bible passage reference parser](https://www.openbible.info/labs/reference-parser/).
## Upgrade Guide from v2 to v3
First, you don't need to update any of your code. The `/js` folder contains a copy of the v2.0.1 code. It hasn't changed at all and works exactly the same.
But if you do want access to new features, I recommend using the code in `/esm` version of the code, which uses `import`-style modules and takes an argument to indicate the language you want to support.
CommonJS files (`require`-style) are available in the `/cjs` folder.
Note that the files in `/esm` and `/cjs` require ESM2022, which means they'll work in Node 16 and later and browsers released after mid-2021.
## Setup
### In a Browser (`<script>` module)
```html
<script type="module">
import { bcv_parser } from "esm/bcv_parser.js";
import * as lang from "esm/lang/en.js";
const bcv = new bcv_parser(lang);
console.log( bcv.parse("John 1").osis() ); // John.1
</script>
```
Note that no variables are accessible from outside the `<script>` tag. If that presents a problem for you, you could potentially do something like:
```html
<script type="module">
import { bcv_parser } from "esm/bcv_parser.js";
import * as lang from "esm/lang/en.js";
window.bcv = new bcv_parser(lang);
window.dispatchEvent(new Event("bcv_loaded"));
</script>
<script>
// Once the module is loaded, it's accessible as the global `bcv` object.
window.addEventListener("bcv_loaded", () => {
console.log(window.bcv.parse("John 15").osis());
});
</script>
```
### In a Browser (regular `<script>` tag)
```html
<script src="/path/cjs/en_bcv_parser.js" charset="utf-8"></script>
<script>
const bcv = new bcv_parser();
console.log( bcv.parse("John 1").osis() ); // John.1
</script>
```
### Node.js (npm)
To install from the command line:
```shell
npm i bible-passage-reference-parser
```
### Node Usage (v16 and Later)
To run using ES modules (newer style). This style requires using a language object when you create a `new` instance of the parser object.
```javascript
import { bcv_parser } from "bible-passage-reference-parser/esm/bcv_parser.js";
import * as lang from "bible-passage-reference-parser/esm/lang/en.js";
const bcv = new bcv_parser(lang);
console.log( bcv.parse("John 1").osis() ); // John.1
```
### Node Usage (Before v16)
To run using CommonJS:
```javascript
const bcv_parser = require("bible-passage-reference-parser/cjs/en_bcv_parser").bcv_parser;
const bcv = new bcv_parser();
console.log( bcv.parse("John 1").osis() ); // John.1
```
### Node.js (Manual, v16 and Later)
This example uses English. After downloading the parser and the language file you want (assuming you put the language file in a `lang` subfolder):
```javascript
import { bcv_parser } from "./bcv_parser.js"; // Adjust paths as needed
import * as lang from "./lang/en.js";
const bcv = new bcv_parser(lang);
console.log( bcv.parse("John 1").osis() ); // John.1
```
## Parsing
Assuming you have an object named `bcv`:
### `.parse("[string to parse]")`
This function does the parsing. It returns the `bcv` object and is suitable for chaining.
```javascript
bcv.parse("John 3:16"); // Returns the `bcv` object.
```
### `.parse_with_context("[string to parse]", "[string context]")`
This function parses a string with a string context as the second argument. As with `.parse()`, it returns the `bcv` object and is suitable for chaining. Use this function if you have a string that starts with what you suspect is a reference and you already know the context. For example, maybe you're parsing a footnote that refers to "verse 16," and you know that the footnote is attached to John 3:
```javascript
bcv.parse_with_context("verse 16", "John 3"); // Returns the `bcv` object.
bcv.osis(); // "John.3.16"
```
It only matches relevant content at the beginning of the first argument; parsing `chapter 2` will work with context (assuming chapter 2 exists: `bcv.parse_with_context("chapter 2", "Hebrews")`), but not `my favorite chapter is chapter 2`. (In fact, if you `.parse_with_context()` that last string, you'd find that the parser read `is chapter 2` as `Isa.2` because `Is 2` is text that someone could use to refer to Isaiah 2.)
Without this function, you could manually prepend the context to the string, but it could get messy: with the context `John 3:16`, the string `verse 17` would become `John 3:16,verse 17`. Depending on your settings, this string might parse as `John.3.16-John.3.17`, which isn't what you want. `.parse_with_context()` lets you avoid such messiness.
Passing a translation as part of the context—`bcv.parse_with_context("verse 16", "John 3 NIV")`—doesn't apply the translation to the first argument. Translations always propagate backward, not forward (`Matt 5:6 (NIV)` rather than `NIV: Matt 5:6`). You can set the `versification_system` option to change the default translation.
### `.osis()`
This function returns a single OSIS for the entire input, providing no information about any translations included in the input.
```javascript
bcv.parse("John 3:16 NIV").osis();
// "John.3.16"
bcv.parse("John 3:16-17").osis();
// "John.3.16-John.3.17"
bcv.parse("John 3:16,18").osis();
// "John.3.16,John.3.18"
bcv.parse("John 3:16,18. ### Matthew 1 (NIV, ESV)").osis();
// "John.3.16,John.3.18,Matt.1"
```
### `.osis_and_translations()`
This function returns an array. Each element in the array is an `[OSIS, Translation]` tuple (both are strings).
```javascript
bcv.parse("John 3:16 NIV").osis_and_translations();
// [["John.3.16", "NIV"]]
bcv.parse("John 3:16-17").osis_and_translations();
// [["John.3.16-John.3.17", ""]]
bcv.parse("John 3:16,18").osis_and_translations();
// [["John.3.16,John.3.18", ""]]
bcv.parse("John 3:16,18. ### Matthew 1 (NIV, ESV)").osis_and_translations();
// [["John.3.16,John.3.18", ""], ["Matt.1", "NIV,ESV"]]
```
### `.osis_and_indices()`
This function returns an array. Each element in the array is an object with `osis` (a string), `translations` (an array of translation identifiers—an empty string unless a translation is specified), and `indices` (the start and end position in the string). The `indices` key is designed to be consistent with Twitter's implementation (the first character in a string has indices `[0, 1]`). If you're looking to tag references in text, this function is probably the one you want.
```javascript
bcv.parse("John 3:16 NIV").osis_and_indices();
// [{"osis": "John.3.16", "translations": ["NIV"], "indices": [0, 13]}]
bcv.parse("John 3:16-17").osis_and_indices();
// [{"osis": "John.3.16-John.3.17", "translations": [""], "indices": [0, 12]}]
bcv.parse("John 3:16,18").osis_and_indices();
// [{"osis": "John.3.16,John.3.18", "translations": [""], "indices": [0, 12]}]
bcv.parse("John 3:16,18. ### Matthew 1 (NIV, ESV)").osis_and_indices();
// [{"osis": "John.3.16,John.3.18", "translations": [""], "indices":[0, 12]}, {"osis": "Matt.1", "translations": ["NIV","ESV"], "indices": [18, 38]}]
```
### `.parsed_entities()`
If you want to know a lot about how the parser handled the input string, use this function. It can include messages if it adjusted the input or had trouble parsing it (e.g., if given an invalid reference).
You probably do not need to use this function.
This function returns an array with a fairly complicated structure. The `entities` key can contain nested entities if you're parsing a sequence of references.
```javascript
bcv.set_options({"invalid_passage_strategy": "include", "invalid_sequence_strategy": "include"});
bcv.parse("John 3, 99").parsed_entities();
```
Returns:
```javascript
[{ "osis": "John.3",
"indices": [0, 10],
"translations": [""],
"entity_id": 0,
"entities": [{
"osis": "John.3",
"type": "bc",
"indices": [0, 6],
"translations": [""],
"start": { "b": "John", "c": 3, "v": 1 },
"end": { "b": "John", "c": 3, "v": 36 },
"enclosed_indices": [-1, -1],
"entity_id": 0,
"entities": [{
"start": { "b": "John", "c": 3, "v": 1 },
"end": { "b": "John", "c": 3, "v": 36 },
"valid": { "valid": true, "messages": {} },
"type": "bc",
"absolute_indices": [0, 6],
"enclosed_absolute_indices": [-1, -1]
}]
},
{ "osis": "",
"type": "integer",
"indices": [8, 10],
"translations": [""],
"start": { "b": "John", "c": 99 },
"end": { "b": "John", "c": 99 },
"enclosed_indices": [-1, -1],
"entity_id": 0,
"entities": [{
"start": { "b": "John", "c": 99 },
"end": { "b": "John", "c": 99 },
"valid": { "valid": false, "messages": { "start_chapter_not_exist": 21 } },
"type": "integer",
"absolute_indices": [8, 10],
"enclosed_absolute_indices": [-1, -1]
}]
}
]
}]
```
You may also see an `alternates` object if you provide an ambiguous book abbreviation (`Ph 2` could mean "Phil.2" or "Phlm.1.2"; "Phil.2" appears as the main entity, while "Phlm.1.2" appears in `[0].entities[0].entities[0].alternates` in this case).
### `.include_apocrypha([Boolean])`
This function takes a single Boolean value (`true` or `false`). If `true`, it tries to find the following books in the Apocrypha (or Deuterocanonicals): Tob, Jdt, GkEsth, Wis, Sir, Bar, PrAzar, Sus, Bel, SgThree, EpJer, 1Macc, 2Macc, 3Macc, 4Macc, 1Esd, 2Esd, PrMan, Ps151. Your canon may vary in the number of books, their order, or the number of verses in each chapter. If you set the value to `false` (the default behavior), it ignores books in the Apocrypha.
```javascript
bcv.parse("Tobit 1").osis(); // ""
bcv.include_apocrypha(true);
bcv.parse("Tobit 1").osis(); // "Tob.1"
```
You shouldn't call `include_apocrypha()` between calling `parse()` and one of the output functions—the output reflects the value of `include_apocrypha()` that was active during the call to `parse()`. You probably also don't want to call it every time you call `parse()`—it will slow down execution.
You might find it easier to use the `testaments` option to specify which testaments (Old, New, and Apocrypha) you want to identify.
### `.set_options({})`
This function takes an object that sets parsing and output options. See [Options](#options) for available keys and values. This function doesn't enforce valid values, but using values other than the ones described in [Options](#options) will lead to unexpected behavior.
```javascript
bcv.set_options({"osis_compaction_strategy": "bcv"});
bcv.parse("Genesis 1").osis(); // "Gen.1.1-Gen.1.31"
```
## Administrative Functions
This function is separate from the parsing sequence and provides data that may be useful for other applications.
### `.translation_info("[translation]")`
This function returns an object of data about the requested translation. You can use this data to determine, for example, the previous and next chapters for a given chapter, even when the given chapter is at the beginning or end of a book.
It takes an optional string argument that identifies the translation—if the translation is unknown, it returns data about the default translation. For English, abbreviations that will change the output are: `default`, `vulgate`, `ceb`, `kjv`, `nab` (or `nabre`), `nlt`, `nrsv`, and `nrsvue`. Sending this function the lower-cased translation output from `osis_and_translations()` or `osis_and_indices()` will return the correct translation information.
The returned object has the following structure:
```javascript
{
"alias": "default",
"books": ["Gen", "Exod", "Lev", ...],
"chapters": {"Gen": [31, 25, ...], "Exod": [22, 25, ...], ...},
"order": {"Gen": 1, "Exod": 2, ...}
"system": "default"
}
```
The `system` key identifies which versification is used. For example, `.translation_info("niv")` returns `kjv` for this key because the NIV uses KJV versification. Objects with identical `system` values are identical. `system` is a synonym for `alias`; these two keys are always identical; `alias` is an older way to refer to versification systems.
The `order` key returns the order in which the books appear in the translation, starting at 1.
The `books` key lists the books in order, which you can use to find surrounding books. For example, if you know from `order` that `"Exod": 2`, you know that you can find it at `books[1]` (because the array is zero-based). Similarly, the book before `Exod` is at `books[0]`, and the book after it is at `books[2]`.
The `chapters` key lists the number of verses in each chapter: `chapters["Gen"][0]` tells you how many verses are in Genesis 1. Further, the `length` of each book's array tells you how many chapters are in each book: `chapters["Gen"].length` tells you how many chapters are in Genesis.
## Options
### OSIS Output
* `consecutive_combination_strategy: "combine"`
* `combine`: "Matt 5, 6, 7" → "Matt.5-Matt.7".
* `separate`: "Matt 5, 6, 7" → "Matt.5,Matt.6,Matt.7".
* `osis_compaction_strategy: "b"`
* `b`: OSIS refs get reduced to the shortest possible. "Gen.1.1-Gen.50.26" and "Gen.1-Gen.50" → "Gen", while "Gen.1.1-Gen.2.25" → "Gen.1-Gen.2".
* `bp`: Same as `b` but preserves partial verses when they appear. "Genesis 1:1a-50:26" parses as "Gen.1.1!a-Gen.50.26", while "Genesis 1:1-50:26" still parses as "Gen".
* `bc`: OSIS refs get reduced to complete chapters if possible, but not whole books. "Gen.1.1-Gen.50.26" → "Gen.1-Gen.50".
* `bcp`: Same as `bc` but preserves partial verses when they appear. "Genesis 1:1a-50:26" parses as "Gen.1.1!a-Gen.50.26", while "Genesis 1:1-50:25" still parses as "Gen.1-Gen.50".
* `bcv`: OSIS refs always include the full book, chapter, and verse. "Gen.1" → "Gen.1.1-Gen.1.31".
* `bcvp`: Same as `bcv` but preserves partial verses when they appear. "Gen 1:1a" parses as "Gen.1.1!a", while "Genesis 1:1" still parses as "Gen.1.1". In all these `p` cases, the "partial" indicator is returned exactly as it appears in the text, so non-Latin languages may have non-Latin characters after the `!` character in the OSIS ref.
### Sequence
* `book_sequence_strategy: "ignore"`
* `ignore`: ignore any books on their own in sequences ("Gen Is 1" → "Isa.1").
* `include`: any books that appear on their own get parsed according to `book_alone_strategy` ("Gen Is 1" → "Gen.1-Gen.50,Isa.1" if `book_alone_strategy` is `full` or `ignore`, or "Gen.1,Isa.1" if it's `first_chapter`).
* `invalid_sequence_strategy: "ignore"`
* `ignore`: "Matt 99, Gen 1" sequence index starts at the valid `Gen 1`.
* `include`: "Matt 99, Gen 1" sequence index starts at the invalid `Matt 99`.
* `sequence_combination_strategy: "combine"`
* `combine`: sequential references in the text are combined into a single comma-separated OSIS string: "Gen 1, 3" → `"Gen.1,Gen.3"`.
* `separate`: sequential references in the text are separated into an array of their component parts: "Gen 1, 3" → `["Gen.1", "Gen.3"]`.
* `punctuation_strategy: "us"`
* `us`: commas separate sequences, periods separate chapters and verses. "Matt 1, 2. 4" → "Matt.1,Matt.2.4".
* `eu`: periods separate sequences, commas separate chapters and verses. "Matt 1, 2. 4" → "Matt.1.2,Matt.1.4".
### Potentially Invalid Input
* `invalid_passage_strategy: "ignore"`
* `ignore`: Include only valid passages in `parsed_entities()`.
* `include`: Include invalid passages in `parsed_entities()` (they still don't have OSIS values).
* `non_latin_digits_strategy: "ignore"`
* `ignore`: treat non-Latin digits the same as any other character.
* `replace`: replace non-Latin (0-9) numeric digits with Latin digits. This replacement occurs before any book substitution.
* `passage_existence_strategy: "bcv"`
* Include `b` in the string to validate book order ("Revelation to Genesis" is invalid).
* Include `c` in the string to validate chapter existence. If omitted, strings like "Genesis 51" (which doesn't exist) return as valid. Omitting `c` means that looking up full books will return `999` as the end chapter: "Genesis to Exodus" → "Gen.1-Exod.999".
* Include `v` in the string to validate verse existence. If omitted, strings like `Genesis 1:100` (which doesn't exist) return as valid. Omitting `v` means that looking up full chapters will return `999` as the end verse: "Genesis 1:2 to chapter 3" → "Gen.1.2-Gen.3.999".
* Tested values are `b`, `bc`, `bcv`, `bv`, `c`, `cv`, `v`, and `none`. In all cases, single-chapter books still respond as single-chapter books to allow treating strings like `Obadiah 2` as `Obad.1.2`.
* `zero_chapter_strategy: "error"`
* `error`: zero chapters ("Matthew 0") are invalid.
* `upgrade`: zero chapters are upgraded to 1: "Matthew 0" → "Matt.1".
* Unlike `zero_verse_strategy`, chapter 0 isn't allowed.
* `zero_verse_strategy: "error"`
* `error`: zero verses ("Matthew 5:0") are invalid.
* `upgrade`: zero verses are upgraded to 1: "Matthew 5:0" → "Matt.5.1".
* `allow`: zero verses are kept as-is: "Matthew 5:0" → "Matt.5.0". Some traditions use 0 for Psalm titles.
* `single_chapter_1_strategy: "chapter"`
* `chapter`: treat "Jude 1" as referring to the complete book of Jude: `Jude.1`. People almost always want this output when they enter this text in a search box.
* `verse`: treat "Jude 1" as referring to the first verse in Jude: `Jude.1.1`. If you're parsing specialized text that follows a style guide, you may want to set this option.
### Context
* `book_alone_strategy: "ignore"`
* `ignore`: any books that appear on their own don't get parsed as books ("Gen saw" doesn't trigger a match, but "Gen 1" does).
* `full`: any books that appear on their own get parsed as the complete book ("Gen" → "Gen.1-Gen.50").
* `first_chapter`: any books that appear on their own get parsed as the first chapter ("Gen" → "Gen.1").
* `book_range_strategy: "ignore"`
* `ignore`: any books that appear on their own in a range are ignored ("Matt-Mark 2" → "Mark.2").
* `include`: any books that appear on their own in a range are included as part of the range ("Matt-Mark 2" → "Matt.1-Mark.2", while "Matt 2-Mark" → "Matt.2-Mark.16").
* `captive_end_digits_strategy: "delete"`
* `delete`: remove any digits at the end of a sequence that are preceded by spaces and immediately followed by a `\w`: "Matt 5 1Hi" → "Matt.5". This is better for text extraction.
* `include`: keep any digits at the end of a sequence that are preceded by spaces and immediately followed by a `\w`: "Matt 5 1Hi" → "Matt.5.1". This is better for query parsing.
* `end_range_digits_strategy: "verse"`
* `verse`: treat "Jer 33-11" as "Jer.33.11" (end before start) and "Heb 13-15" as "Heb.13.15" (end range too high).
* `sequence`: treat them as sequences ("Jer 33-11" → "Jer.33,Jer.11", "Heb 13-15" → "Heb.13").
### Testaments
* `testaments: "on"`
* `o`: include `o` in the value to look for Old Testament books (Genesis to Malachi in the Protestant canon).
* `n`: include `n` in the value to look for New Testament books (Matthew to Revelation in the Protestant canon).
* `a`: include `a` in the value to look for books in the Apocrypha. Calling `include_apocrypha(true)` simply adds an `a` to this value, while calling `include_apocrypha(false)` removes it. The next values are all combinations of these three primitives.
* `on` includes the Old and New Testaments.
* `ona` includes the Old and New Testaments and the Apocrypha.
* `oa` includes the Old Testament and the Apocrypha.
* `na` includes the New Testament and the Apocrypha.
* `ps151_strategy: "c"`
* `c`: treat references to Psalm 151 (if using the Apocrypha) as a chapter: "Psalm 151:1" → "Ps.151.1"
* `b`: treat references to Psalm 151 (if using the Apocrypha) as a book: "Psalm 151:1" → "Ps151.1.1". Be aware that for ranges starting or ending in Psalm 151, you'll get two OSISes, regardless of the `sequence_combination_strategy`: "Psalms 149-151" → "Ps.149-Ps.150,Ps151.1". Setting this option to `b` is the only way to correctly parse OSISes that treat `Ps151` as a book.
### Versification
* `versification_system: "default"`
* `default`: the default ESV-style versification. Also used in AMP and NASB.
* `ceb`: use CEB versification, which varies mostly in the Apocrypha.
* `csb`: use CSB versification, which differs in two New Testament books.
* `kjv`: use KJV versification, with one fewer verse in 3John. Also used in NIV and NKJV.
* `nab`: use NABRE versification, which generally follows the Septuagint.
* `nlt`: use NLT versification, with one extra verse in Rev. Also used in NCV.
* `nrsv`: use NRSV versification.
* `nrsvue`: use NRSVUE versification.
* `vulgate`: use Vulgate numbering for the Psalms.
### Case Sensitivity
* `case_sensitive: "none"`
* `none`: All matches are case-insensitive.
* `books`: Book names are case-sensitive. Everything else is still case-insensitive.
* `translations`: Translation identifiers (such as "KJV") are case-sensitive.
* `books,translations`: Book names and translation identifiers are case-sensitive. Everything else (such as the word "verse" if it occurs in the text) is still matched case-insensitively.
### Warnings
* `warning_level: "none"`
* `none`: Don't use `console.warn`.
* `warn`: Send `console.warn` messages when setting an unknown `versification_system` or `punctuation_strategy`, getting unknown `translation_info()`, or redefining an existing translation in `add_translations()`.
### Grammar
You can set `grammar` with the below keys.
This object controls runtime behavior of the grammar so that you can override certain patterns with a custom regular expression. For example, maybe you never want to use `.` as a chapter-verse separator because your style requires a `:` instead. Here's how you'd do that:
```javascript
bcv.parse("John 3.16").osis(); // John.3.16
bcv.set_options({
grammar: {
cv_sep_us: /^:/
}
});
bcv.parse("John 3.16").osis(); // John.3,John.16
```
Here the "16" gets parsed as a chapter because `.` is a valid sequence separator. To fully get what you're (probably) looking for, try the following. Here the parsing stops after the "3" because "." is no longer a valid character to parse.
```javascript
bcv.set_options({
grammar: {
cv_sep_us: /^:/,
sequence_us: /^(?:[,;]|\s*and\s*)+/
}
});
bcv.parse("John 3.16").osis(); // John.3
```
Here are the valid keys for this object:
1. `ab`: Partial verses (the "a" in "John 3:16a").
2. `and`: The last item in a sequence ("John 3:16 and 17").
3. `c_explicit`: An explicit chapter reference (the "chapters" in "John 3:2, chapters 1 and 2").
4. `c_sep_eu`: A separator to indicate that what follows is a new chapter when the `eu` punctuation strategy is active, even if it otherwise looks like a verse (if set appropriately, the "; " in "John 3:1; 5").
5. `c_sep_us`: The same as `c_sep_eu` but when the `us` `punctuation_strategy` is active.
6. `cv_sep_weak`: A chapter-verse separator that can be overridden based on context (the space in "John 3 1").
7. `cv_sep_eu`: The chapter-verse separator to use when the `eu` `punctuation_strategy` is active (the "," in "John 3, 16").
8. `cv_sep_us`: The chapter-verse separator to use when the `us` `punctuation_strategy` is active (the ":" in "John 3:16").
9. `ff`: Short for "and following," used to indicate a range through the end of the current chapter or book, depending on context (the "ff" in "John 3:16ff").
10. `in_book_of`: Used in contexts like "the 3rd chapter from the book of John". This has to be set up for the language at compile time and won't do anything for you.
11. `next`: Appears in some languages to indicate the immediate next verse or chapter. Not used in English, but conceptually similar to `ff`.
12. `ordinal`: Used with `in_book_of`. It also has to be set up for the language at compile time and won't do anything for you.
13. `range`: A range of verses or chapters (the "-" in "John 3:16-17").
14. `sequence_eu`: The sequence separator to use when the `eu` `punctuation_strategy` is active (the "." in ("John 3,16. 17")).
15. `sequence_us`: The sequence separator to use when the `us` `punctuation_strategy` is active (the "," in ("John 3.16, 17")).
16. `space`: Characters to use as a space. Includes an asterisk by default because people in practice sometimes use asterisks for spaces.
17. `title`: A psalm title (the "title" in "Psalm 3, title").
18. `v_explicit`: An explicit verse reference (the "verse" in "John 3 verse 16").
The RegExp you provide must always start with a `^` to match the beginning of a string. If you have multiple alternates, each one should be anchored with a `^` (or, better, do something like `/^(?:pattern1|pattern2)/`. You can use any valid regular expression, though it's possible to significantly degrade performance with complex ones.
If you use a pattern that includes a character that wasn't included at compile time, it probably won't match your pattern. This limitation is a known issue for version 3.1.0; it may change in the future.
You're overriding the existing patterns entirely. For example, if you set `sequence_us` to `/^,/`, and someone enters "John 3:16 and 17", the "and 17" won't match.
If you set overlapping patterns (e.g., changing `ab` so that it includes "f", which is also used for `ff`), the precendence won't necessarily be predictable and may not produce the output you want.
You can use this pattern to guarantee that something will never match: `/^\x1f\x1f\x1f/`.
## Messages
If you're calling `parsed_entities()` directly, the following keys can appear in `messages`; they don't always indicate an invalid reference; they may just indicate the chosen parsing strategy.
### Start Objects
* `start_book_not_defined`: `true` if a `c` or similar non-book object is lacking a book context. This message only occurs when the object becomes dissociated from the related book, as in `Chapters 11-1040 of II Kings`. It's highly unusual.
* `start_book_not_exist`: `true` if the given book doesn't exist in the translation. A book has to be omitted from the translation's definition to generate this message.
* `start_chapter_is_zero`: `1` if the requested start chapter is 0.
* `start_chapter_not_exist`: The value is the last valid chapter in the book.
* `start_chapter_not_exist_in_single_chapter_book`: `1` if wanting, say, `Philemon 2`. It is reparsed as a verse (`Philemon 1:2`).
* `start_verse_is_zero`: `1` if the requested start verse is 0.
* `start_verse_not_exist`: The value is the last valid verse in the chapter.
### End Objects
* `end_book_before_start`: `true` if the end book is before the start book (the order depends on the translation being used). E.g., `Exodus-Genesis`.
* `end_book_not_exist`: `true` if the given book doesn't exist in the translation. A book has to be omitted from the translation's definition to generate this message.
* `end_chapter_before_start`: `true` if the end chapter is before the start chapter in the same book.
* `end_chapter_is_zero`: `1` if the requested end chapter is `0`. The `1` indicates the first valid chapter.
* `end_chapter_not_exist`: The value is the last valid chapter in the book.
* `end_chapter_not_exist_in_single_chapter_book`: `1` if wanting, say, `Philemon 2-3`. It is reparsed as a verse (`Philemon 1:2-3`).
* `end_verse_before_start`: `true` if the end verse is before the start verse in the same book and chapter.
* `end_verse_is_zero`: `1` if the requested end verse is `0`. The `1` indicates the first valid verse.
* `end_verse_not_exist`: The value is the last valid verse in the chapter.
### Translation Objects
* `translation_invalid`: `[]` if an invalid translation sequence appears. Each item in the array is a `translation` object.
* `translation_unknown`: `[]` if the translation is unknown. If you see this message, a translation exists in `bcv_parser.regexps.translations` but not in `bcv_parser.translations`. Each item in the array is a `translation` object.
## Adding New Book Patterns
The `.add_books()` function lets you add new patterns to find books in text. Here's an example; let's say you want to allow "Marco" and "Mrc" to be parsed by the English parser:
```javascript
const bcv = new bcv_parser(lang);
bcv.parse("Marco 1").osis(); // No result.
bcv.add_books({books: [{ // `books` is always an array of objects.
osis: ["Mark"], // An array of OSIS book names that you want the pattern to match.
regexp: /Marco|Mrc/ // The regular expression. You don't need to provide bounding characters.
}]});
bcv.parse("Marco 1").osis(); // Mark.1
bcv.parse("Mrc 1").osis(); // Mark.1
```
Unlike most other functions, this one will throw an error if anything's not quite right with the input.
You probably want to [NFC-normalize](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/normalize) any patterns before adding them so that they're consistent with built-in patterns.
The books that the parser will find are governed by the `testaments` setting. If you have it set to `o` (so you only find books in the Old Testament) but create pattern for a New Testament book, your pattern won't match until you set `testaments` to contain an `n`.
Here are the keys you can set in each object in the array:
* `osis`: This is an array of OSIS book names that you want your pattern to match. Typically you only want to match one, but some abbreviations, like "Ma" in English, can match multiple books. The parser will prefer the first valid one. If you try to parse `Ma 28` with the `osis: ["Mal", "Matt"]`, it will pick `Matt` because `Mal` doesn't have 28 chapters (assuming you have a `bc` or `bcv` `passage_existence_strategy`).
* `regexp`: This is the RegExp that will be used to match. Any flags you set are ignored; ultimately, it'll end up with `giu` flags set. It's possible to write non-performant regular expressions; it's up to you to ensure they meet your needs.
* `insert_at`: A string to indicate the order you want your pattern parsed in compared to other books.
* It defaults to `start`, meaning that your patterns will be parsed before any others.
* You can also set it to `end` (to parse it after everything else). For example, maybe you want to be sure that your new `/Corinthians/` pattern is always parsed after 1 and 2 Corinthians so that it doesn't eat up valid longer patterns.
* You can also provide it an OSIS string, like `Matt`. That will ensure that your pattern is inserted just before the first pattern that matches Matthew. Note that the order of the regular expressions isn't necessarily predictable: generally patterns for `2Cor` are parsed before `1Cor`, for example. You can also specify a pattern that matches multiple books by comma-separating them: `Matt,Mal` will match only a pattern that matches both books. In practice, you probably don't want to do that.
* `pre_regexp`: Normally, the book patterns you provide are bounded by other RegExps to ensure that we don't lift out potential book matches from the middle of words. You can use this key to assign your own RegExp. Importantly, it shouldn't use any capturing groups (`(...)`). It also should only consist of zero-width assertions (like negative lookbehinds, `\b`, or `^` anchors). If your pattern gobbles text, it will throw off the parser.
* `post_regexp`. Similarly, you can provide a pattern for after the book. Here it's also important not to gobble any characters, so you should only use zero-width assertions. If you set either `pre_regexp` or `post_regexp`, you probably want to test extensively. Because the RegExps have the `u` flag, you can use `\p` classes for bounding. For example, `(?=[^\p{L}])` asserts that the next character isn't a letter.
## Adding New Translations
The `.add_translations()` function lets you define new translations. Let's say you want to define an "NIV1984" translation for the parser to find in the text you provide. The NIV1984 uses the same versification system as the NIV (2011), which is "kjv" (since the KJV and the NIV have the same number of chapters and verses in each book):
```javascript
// Here the parser identifies Mark 1 but not the translation.
bcv.parse("Mark 1 (NIV1984)").osis_and_translations(); // ["Mark.1", ""]
bcv.add_translations({
translations: [{ text: "NIV1984", system: "kjv" }]
});
bcv.parse("Mark 1 (NIV1984)").osis_and_translations(); // [["Mark.1", "NIV1984"]]
// This verse exists in the default versification but not in the NIV1984 or the KJV.
bcv.parse("3 John 15 (NIV1984)").osis_and_translations(); // []
```
It's also possible to define a custom versification system. The following defines the first and only book in the system to be `Matt` (but note that any undefined books are added to the end), and it defines `Matt` to have only one chapter with 10 verses. The value in `system` should match a key in `systems` if you're defining a custom one. The `osis` key lets you define what gets reported back out to you.
```javascript
bcv.add_translations({
translations: [{ text: "ONLYMATT", osis: "MATTHEWTRANSLATION", system: "custom1" }],
systems: {
custom1: {
books: ["Matt"],
chapters: {
"Matt": [10]
}
}
}
});
bcv.parse("Matt 1:2 ONLYMATT").osis_and_translations(); // [["Matt.1.2", "MATTHEWTRANSLATION"]]
```
As with `add_books()`, you can define `pre_regexp` and `post_regexp` at the top level of the object (not for individual translations). As in `add_books()`, the patterns you use should not consume any characters.
This function will throw an error if something isn't right in the data you've sent.
## Unicode
If you're dealing with non-ASCII characters, you probably want to [NFC-normalize](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/normalize) your input before sending it to the parser. All the built-in patterns are normalized with the NFC algorithm. The parser doesn't perform this normalization for you because it can affect the length of your input, which in turn would affect the offsets. If you need to, you can always perform NFD normalization after you're done parsing.
## Caveats
The parser is quite aggressive in identifying text as Bible references; if you just hand it raw text, you will probably encounter false positives, where the parser identifies text as Bible references even when it isn't. For example, in the string `she is 2 cool`, the `is 2` is parsed as `Isa.2`.
The parser spends most of its time doing regular expressions and manipulating strings. If you give it a very long string full of Bible references, it could block your main event loop. Depending on your performance requirements, parsing large numbers of even short strings could saturate your CPU and lead to problems in the rest of your app.
In addition, a number of the tests in the "real-world" section of [`test/realworld.spec.js`](https://github.com/openbibleinfo/Bible-Passage-Reference-Parser/blob/master/test/realworld.spec.js) have comments describing limitations of the parser. Unfortunately, it's hard to solve them without incorrectly parsing other cases—one person intends `Matt 1, 3` to mean `Matt.1,Matt.3`, while another intends it to mean `Matt.1.3`.
## Tests
One of the hardest parts of building a BCV parser is finding data to test it on to tease out corner cases. The [`test`](https://github.com/openbibleinfo/Bible-Passage-Reference-Parser/blob/master/test) folder has over 3,700 tests that illustrate the range of input that this parser can handle.
Separate from this repository are four data files that you can use to test your own parser. Derived from Twitter and Facebook mentions of Bible references, the dataset reflects how people really type references in English. It includes 4.7 million unique strings across 180 million total mentions. (For example, the most-popular string, "Philippians 4:13", is mentioned over 1.3 million times.)
1. [10+ mentions in the dataset](https://a.openbible.info/data/bcv-parser/10plus.zip). 465,000 unique strings, 4 MB. If you're just beginning to develop your own parser and are looking for raw data, start with this file.
2. [3-9 mentions in the dataset](https://a.openbible.info/data/bcv-parser/3-9.zip). 818,000 unique strings, 7 MB.
3. [2 mentions in the dataset](https://a.openbible.info/data/bcv-parser/2.zip). 743,000 unique strings, 7 MB.
4. [1 mention in the dataset](https://a.openbible.info/data/bcv-parser/1.zip). 2.7 million unique strings, 25 MB. This file contains strings that only appear once in the corpus.
The tests are arranged in three columns:
1. `Popularity` is the number of times the text appears in the corpus. You can use this column as a way to prioritize how to handle corner cases.
2. `Text` is the raw text of the reference. Tabs and newline characters (`[\t\r\n]`) are converted to spaces; otherwise they appear unaltered from their source.
3. `OSIS` is the OSIS value of the text as parsed by this BCV Parser. If one or more translations appears, it precedes a colon at the start of the string. For example: `Matt 5, 7, NIV, ESV` has an OSIS value of `NIV,ESV:Matt.5,Matt.7`. Otherwise, the OSIS consists only of OSIS references separated by commas. You may choose to interpret certain cases differently to suit your needs, but this column gives you a reasonable starting point from which to validate your parser.
This dataset has a few limitations:
1. It's self-selecting in that it only includes content that this BCV parser understands.
2. It doesn't include as many misspellings as you'd expect because the queries used to retrieve the data only use correct spellings. Misspellings that do occur are incidental—they're part of content that otherwise includes a non-misspelled book name.
3. Its coverage of Deuterocanonical books is very limited; as with misspellings, the queries used to retrieve the data don't include books from the Apocrypha.
4. It doesn't include context that could change the interpretation of the string.
5. Sequences interrupted by translation identifiers are separated: the parsing of `Matt 1 NIV Matt 2 KJV` appears in two separate lines.
6. It's only in English.
## OSIS
[OSIS](https://crosswire.org/osis/) is a system for marking up Bibles in XML. The BCV parser only borrows the OSIS system for [book abbreviations](http://www.crosswire.org/wiki/OSIS_Book_Abbreviations) and references. You can control the OSIS specificity using the `osis_compaction_strategy` option. I like OSIS references because, programmatically, they're easy to handle: they always include a book name along with optional chapters, verses, and partial verses, depending on the level of specificity you need.
The parser emits `GkEsth` for Greek Esther rather than just `Esth`. It can include `Ps151` as part of the Psalms (`Ps.151.1`)—the default—or as its own book (`Ps151.1.1`), depending on the `ps151_strategy` option.
<table>
<tr><th>Input</th><th>OSIS</th></tr>
<tr><td><code>John</code></td><td><code>John</code> or <code>John.1-John.21</code> or <code>John.1.1-John.21.25</code></td></tr>
<tr><td><code>John-Acts</code></td><td><code>John-Acts</code> or <code>John.1-Acts.28</code> or <code>John.1.1-Acts.28.31</code></td></tr>
<tr><td><code>John 3</code></td><td><code>John.3</code> or <code>John.3.1-John.3.36</code></td></tr>
<tr><td><code>John 3:16</code></td><td><code>John.3.16</code></td></tr>
<tr><td><code>John 3:16-17</code></td><td><code>John.3.16-John.3.17</code></td></tr>
<tr><td><code>John 3:16-4:1 and 4:2-5a</code></td><td><code>John.3.16-John.4.1,John.4.2-John.4.5!a</code></td></tr>
</table>
## Program Flow
This section describes the parsing of a typical string:
```javascript
const bcv = new bcv_parser(lang); // Declare the object
bcv.parse("John 3:16"); // Do the parsing
console.log(bcv.osis()); // "John.3.16"
```
### Matching Potential Passages
The `bcv.parse()` function accepts a string. It first replaces any reserved characters that we're going to need later in the program without affecting any of the character indices.
Then it runs through all the regexps for Bible books (`match_books()`). In this case, it matches the `John` part of the string and replaces it with the characters `\x1f0\x1f`. The two `\x1f` characters provide boundaries for the match, and the `0` matches an index in the `books` array we're using to keep track of the original string and some metadata. (If there were more books, they would be `\x1f1\x1f`, `\x1f2\x1f`, etc.) These books aren't necessarily replaced in the order they appear in the string, but rather in the precedence order specified in `regexps.books`—we want to parse `1 John` before `John` so that program doesn't interpret the `John` in `1 John` as being a separate book. In other words, match longer books first.
Once it has matched all the possible books in the string, we call `match_passages()` to identify complete passages—we want to be sure to treat strings like `John 3:16, 17` as a single sequence. The `regexps.escaped_passage` used for these matches is fairly complicated. It looks for some unusual cases (`chapter 23 of Matthew`) first, but it pivots around the escaped book sequence from `match_books()`: it tries to find numbers and other characters that can comprise a valid sequence after a book (including other books). We know that we'll probably have to trim some of what it finds later; at this point, we want to be as comprehensive as possible.
For each match, we trim some unnecessary parts from the end of it and then run it through the grammar file that identifies the components of the string (in this case, `John 3:16` fits the pattern of a `bcv`, or book-chapter-verse). The grammar uses [Peggy](https://peggyjs.org/), a [parsing expression grammar](https://en.wikipedia.org/wiki/Parsing_expression_grammar) with a DSL that compiles to Javascript. A PEG provides predictable performance, especially for shorter strings like Bible references. The grammar identifies components in the match and, importantly, records the indices of where each component starts and ends in the string. Peggy's built-in extension mechanism provides an easy way to output the necessary data. The tradeoff of using a PEG arrives in the form of increased code size: around half the code in the minified file comes from the auto-generated grammar.
We also look here for a corner case of the format `1-2 Samuel`, where the book range precedes the book name. If it exists, we construct an object to use later.
After the regexp has found all the matches in the string (and the grammar has taken a pass at them), we return to `parse`, which loops through the results, sending each one in turn to the `bcv_passage` object.
### Interpreting Grammar Results
The `bcv_passage` object is responsible for the bulk of the heavy lifting in interpreting the output of the grammar. Most of its functions correspond to types (such as `bcv`) returned from the grammar. These functions accept three arguments: a `passage` that reflects the output from the grammar, an `accum` that reflects the processing results thus far, and a `context` that reflects the current processing state—if a function sees a `16` and knows that the context is `John.3`, it can interpret the `16` as a verse number rather than, say, `John.16`. These functions don't alter global state and are safe to run any number of times over the content, a situation that can happen if the initial parsing strategy doesn't work out.
In the case of a `bcv`, the `passage` object consists of two values: a `bc` (the book-chapter combination) and a `v` (the verse number). Since a new book renders any existing context unnecessary, we first get rid of the existing context. We then loop through the possible book values—usually there's only one, but an ambiguous book abbreviation like `Ph` (`Phil` or `Phlm`) can have more than one—to find valid references. For example, given `Ph 20`, we know that only Philemon fits the bill (`Phlm.1.20`) since there's no chapter 20 in Philippians. Much of the logic in functions dealing with books revolves around this process of identifying valid passages.
Once we've identified a viable book, we record the position of the match in the original string, set the `context` for any future processing, and move on. In the case of `John 3:16`, we're done and head back up to `parse`.
### Ranges
The `bcv` function is fairly straightforward—the logic doesn't get too convoluted. Much of the processing complexity in the parser arises from dealing with ranges that have errors in them or are ambiguous. The basic principle is that end ranges that go beyond the valid end of a book or a chapter are OK—people are often imprecise when it comes to remembering how many chapters are in a book or verses are in a chapter. Four tricky cases arise fairly often, however.
The first tricky case comes from people who like to use hyphens in ways that don't just indicate ranges. For example, the string `Hebrews 13-15` (Hebrews has thirteen chapters) most likely means `Hebrews 13:15`. In some cases, we can guess that that's the case and correct our interpretation. The algorithm the program uses asks whether the end chapter is too high—and if it is, whether the end chapter could be a valid start verse. If so, it proceeds as though that's the case.
The second tricky case arises from strings like `John 10:22-42 vs 27`. In this case, the grammar has indicated that `42 vs 27` is a `cv`, or chapter-verse (in other words, `John.10.22-John.42.27`). However, when the purported end chapter doesn't exist, it makes more sense to treat it as a sequence: `John.10.22-John.10.42,John.10.27`.
The third tricky case stems from strings like `Psalm 123-24`. The grammar output suggests that we should interpret this range as invalid: `Ps.123-Ps.24`. Instead, we choose to interpret it as `Ps.123-Ps.124`. This approach can be aggressive at times: does `Psalm 15-6` really mean `Ps.15-Ps.16`?
The fourth tricky case resembles the first one: `Jeremiah 33-11` isn't the invalid range `Jer.33-Jer.11` but rather the `bcv` `Jer.33.11`.
If we still couldn't make sense of the range, then we treat it as a sequence of verses instead of a range: `Psalm 120-119` becomes `Ps.120,Ps.119`.
### Translations
Translations are complicated because they propagate backward, whereas passage context propagates forward: `Matt 2 (KJV), Eph 6 (NIV)` means that the KJV should apply to Matthew 2, while the NIV should apply to Ephesians 6.
In theory, some translations could have different books or chapter/verse counts, so if we've made assumptions up to this point that, say, certain chapters have a specific number of verses, we may need to revisit those assumptions. Therefore, we reprocess everything we've already seen.
### Generating Output
With the `bcv_passage` processing complete, we exit the `parse()` function; you can now ask for the results in the format that's convenient for you.
All the output functions call `parsed_entities()`. This function loops through the results from `bcv_passage`, constructing an array of objects that other functions can draw from. This function ignores entities you're not interested in and adjusts indices to exclude some entities. For example, you may not want the `Ex` in `Hab 2 Ex`. (You can control this behavior using the options.) Most of the logic involves getting the indices right in corner cases.
This function also creates OSIS strings and can combine consecutive references into a single range (e.g., `John.1,John.2` becomes `John.1-John.2`).
You're probably not calling this function directly but instead are using `osis()` or `osis_and_indices()`, detailed above.
## Performance
Performance degrades with the number of passages found in a string. You can generally expect to parse over 100 KB per second.
## Alternate Versification Systems
The BCV parser supports several versification systems (described above). The appropriate versification system kicks in if the parsed text explicitly mentions a translation with an alternate versification system, or you can use `set_options({"versification_system":"..."})`. You can extend the relevant `translation_additions.js` to add additional ones (though the build process overwrites this file; you may be better off adding them in the `data.txt` for your language of interest.
You can also add new versification systems and translations at runtime using `.add_translations()`.
## Non-English Support
Each file in `esm/lang` provides support for additional languages.
### Supported Languages
Most of these languages are in [Google Translate](https://translate.google.com/).
<table>
<tr><th>Prefix</th><th>Language</th>
<tr><td>ar</td><td>Arabic</td></tr>
<tr><td>bg</td><td>Bulgarian</td></tr>
<tr><td>ceb</td><td>Cebuano</td></tr>
<tr><td>cs</td><td>Czech</td></tr>
<tr><td>cy</td><td>Welsh</td></tr>
<tr><td>da</td><td>Danish</td></tr>
<tr><td>de</td><td>German</td></tr>
<tr><td>el</td><td>Greek (mostly ancient)</td></tr>
<tr><td>en</td><td>English</td></tr>
<tr><td>es</td><td>Spanish</td></tr>
<tr><td>fa</td><td>Farsi</td></tr>
<tr><td>fi</td><td>Finnish</td></tr>
<tr><td>fr</td><td>French</td></tr>
<tr><td>he</td><td>Hebrew</td></tr>
<tr><td>hi</td><td>Hindi</td></tr>
<tr><td>hr</td><td>Croatian</td></tr>
<tr><td>ht</td><td>Haitian Creole</td></tr>
<tr><td>hu</td><td>Hungarian</td></tr>
<tr><td>id</td><td>Indonesian</td></tr>
<tr><td>is</td><td>Icelandic</td></tr>
<tr><td>it</td><td>Italian</td></tr>
<tr><td>ja</td><td>Japanese</td></tr>
<tr><td>jv</td><td>Javanese</td></tr>
<tr><td>ko</td><td>Korean</td></tr>
<tr><td>la</td><td>Latin</td></tr>
<tr><td>mk</td><td>Macedonian</td></tr>
<tr><td>mr</td><td>Marathi</td></tr>
<tr><td>ne</td><td>Nepali</td></tr>
<tr><td>nl</td><td>Dutch</td></tr>
<tr><td>no</td><td>Norwegian</td></tr>
<tr><td>or</td><td>Oriya</td></tr>
<tr><td>pa</td><td>Punjabi</td></tr>
<tr><td>pl</td><td>Polish</td></tr>
<tr><td>pt</td><td>Portuguese</td></tr>
<tr><td>ro</td><td>Romanian</td></tr>
<tr><td>ru</td><td>Russian</td></tr>
<tr><td>sk</td><td>Slovak</td></tr>
<tr><td>so</td><td>Somali</td></tr>
<tr><td>sq</td><td>Albanian</td></tr>
<tr><td>sr</td><td>Serbian</td></tr>
<tr><td>sv</td><td>Swedish</td></tr>
<tr><td>sw</td><td>Swahili</td></tr>
<tr><td>ta</td><td>Tamil</td></tr>
<tr><td>th</td><td>Thai</td></tr>
<tr><td>tl</td><td>Tagalog</td></tr>
<tr><td>tr</td><td>Turkish</td></tr>
<tr><td>uk</td><td>Ukrainian</td></tr>
<tr><td>ur</td><td>Urdu</td></tr>
<tr><td>vi</td><td>Vietnamese</td></tr>
<tr><td>zh</td><td>Chinese (both traditional and simplified)</td></tr>
</table>
When parsing a language that doesn't use Latin-based numbers (0-9), you probably want to set the `non_latin_digits_strategy` option to `replace`.
When using `<script>`s on the web, be sure to serve them with the `utf-8` character set—all files contain raw UTF-8 characters. The safest way to ensure the right character set is to include the `charset` attribute on the `<script>` tag:
```html
<script src="bcv_parser.js" charset="utf-8"></script>
```
### Cross-Language Support
Two files in `esm/lang` provide support for identifying translations in multiple languages at one time (e.g., "Matthew 2, Juan 1"). You can use this support if you don't know ahead of time what language someone might be using.
The files are:
1. `ascii.js`. Only supports characters in the set `[\x00-\x7f\u2000-\u206F]` (ASCII characters and certain punctuation marks like em-dashes). It runs about 12% slower than `en.js`, parsing around 140KB per second in the fuzz tester.
2. `full.js`. Parse book names across all languages. It runs about 30% slower than `en.js`, parsing around 110KB per second in the fuzz tester.
Some features, such as psalm titles, are still English-only, even in these cross-language files.
Executing `bin/add_cross_lang.pl full` or `bin/add_cross_lang.pl ascii` will recompile the needed source files. You can then compile the files as usual using the [build instructions](#building).
## Compatibility
These files work in any environment that supports ES2022; any browsers released since mid-2021 should support ES2022:
* Chrome 74+
* Safari 14.1+ (14.5+ on iOS)
* Edge 79+
* Firefox 90+
* Node 16+
The `js` folder contains an older version of this code (2.0.1) and supports much older browsers, back to IE8, Firefox 12, Chrome 19, and Node 0.10. The code in this folder is no longer maintained.
## Building
The BCV Parser uses the following projects (none of them is necessary unless you want to edit the source files or run tests) as dev dependencies:
* [esbuild](https://esbuild.github.io/) to package the files.
* [Jasmine 5.5.0](https://jasmine.github.io/) for the testing framework.
* [Peggy](https://github.com/peggyjs/peggy) for the parsing grammar.
* [Grex](https://github.com/pemistahl/grex) for optimizing generated regular expressions.
### Adding a Language
#### Create a Folder
In `src`, create a folder named after the [ISO 639 code](https://www.loc.gov/standards/iso639-2/php/code_list.php) of the desired language. For example: `fr`.
#### Create Data Files
Create a data.txt file inside that folder. I recommend copying the existing `data.txt` from `src/template`. The `research.xlsx` file in that folder can help you organize your work; you can copy/paste from that spreadsheet into the `data.txt` file.
Points to know:
1. Lines that start with `#` are comments.
2. Lines that start with `$` are variables.
1. `$FIRST`, `$SECOND`, `$THIRD`, and `$FOURTH` are helpful for reducing redundancy in book names. For example, if you define `1` and `I` as values for `$FIRST`, then you can just write `$FIRST Samuel` and `$FIRST Corinthians` instead of repeating yourself for each book.
2. `$GOSPEL` is helpful to reduce verbosity for names like "The Gospel according to Matthew".
3. `$AB` is used by the parser to determine partial verses (like "Genesis 1:1a"). You probably want to avoid overlap with `$FF`. It's required.
4. `$AND` is used by the parser for sequences. For example, in English, you'd define `and` here, while in Spanish, you'd define `y`. It's also useful for common expressions like `see also`. If you don't have a value for this, you can just use `&`. It's required.
5. `$CHAPTER` is used by the parser to explicitly indicate chapters: `Genesis 1:1, 5` vs. `Genesis 1:1, chapter 5`. If you don't have a value for this, I'd just use the English `chapter`. It's required.
6. `$FF` is used by the parser to indicate "and following", which it interprets as "to the end of the chapter" or "to the end of the book," depending on context. An exclamation mark (`!`) negates the following character or character class. `f![a-z]`) means an "f" not followed by the characters `a-z`. This syntax is passed onto the Peggy parser. If you don't have a value for this, I'd use the English `ff`. It's required.
7. `$NEXT` is used in languages where one pattern indicates the immediate next verse (or chapter). In Polish, for example the `n![n]` pattern indicates a single `n`, not followed by another `n`; the `nn` is used for `$FF` to indicate an unknown number of following verses. It's optional.
8. `$TITLE` is used by the parser for psalm titles (like `Psalm 3 title`). It's required.
9. `$TRANS` defines custom translation names for your language. Each value has up to three components, separated by commas. Let's break down the (fictional) definition `NAS,NASB,kjv`. The `NAS` means to track `NAS` in what you're parsing, and to treat it as a known translation. The `NASB` means that when you get the value back to your script from the parser, you'll receive `NASB` instead of `NAS`. The `kjv` means to use the KJV versification system for this translation rather than the default. Often you'll omit parts: `NAS,,kjv` and `NAS,NASB` (this last one is the actual definition) both work. You should specify at least one translation for your language.
10. `$TO` is used by the parser to identify ranges. Typically you want to use words here, but you can also use characters. If you don't have a value, I recommend using `-` (which treats the hyphen as a range). It's required.
11. `$VERSE` is used by the parser to identify the words for `verse`. For example, `Philemon verse 2`. It's required.
12. `$COLLAPSE_COMBINING_CHARACTERS` is used by the language generator to determine whether to unbundle accents. For example, `á` gets separated out into `[áa]`. If you don't want this behavior for your language, set this value to `false`. You can also handle this scenario on a case-by-case basis using backticks, as described below.
13. `$PRE_BOOK_ALLOWED_CHARACTERS` is used by the language generator to identify allowed characters before books. See `zh` for an example (`[^\x1f]`, which means anything except another book character). In general, you don't need to set this variable.
14. `$UNICODE_BLOCK` is used by the language generator to identify appropriate boundary characters. It's required. If you're not sure, I recommend setting it to `Latin`.
3. Lines that start with an OSIS book name are a tab-separated series of regular expression subsets.
1. A backtick (`\``) following an accented character means not to allow the unaccented version of that character.
2. A `?` makes the preceding character optional.
3. A character class works like a simple RegExp character class: `[az]` means the characters `a` and `z` are allowed. Character ranges probably don't work.
2. You can use the variables you defined earlier in the file in definitions. If you'd like the build process to create tests for book ranges like `1-3 John`, then be sure to define `$FIRST`, `$SECOND`, and `$THIRD` as variables in at least one definition for `1John`, `2John`, and `3John`.
4. Lines that start with `=` are the order in which to check the regular expressions for books (check for `3 John` before `John`, for example, so that the string `3 John 2` doesn't get parsed as `John 2`).
5. Lines that start with `*` are the preferred long and short names for each OSIS (not used here, but potentially used in a Bible application). The third column represents a still-shorter form, and the fourth column represents the form to use when a single Psalm is being looked at. In English, we'd say "Psalms 1-2" but "Psalm 1."
You can also create three other files:
1. `translation_additions.js` (see `en` for an example). This file lets you define translations with different book orders or numbers of verses in each chapter. The top-level keys are the versification system (the third component of your `$TRANS` definition). Each object can have an `order` object, a `chapters` object, or both. `order` is a fully specified canon. It must include all 84 books that this parser understands, or else the parser could break. If your canon doesn't include all 84 books, you can put the unused ones at the end. `chapters` is an object where each key is the OSIS book identifier, and the value is an array of numbers that represent the number of verses in each chapter. For example, `"Gen": [31, 25, ...]` indicates that Genesis 1 has 31 verses, and Genesis 2 has 25 verses. You must specify values for all chapters in a book, though the number of chapters can vary depending on the translation. For `Ps151`, use a `Ps151` key; don't create a `Ps` array with 151 items; the parser will eat the last item.
2. `spec_additions.js` (see `he` for an example). This file consists of Jasmine tests that you'd like to run for your language in addition to the standard parsing tests.
3. `psalm_cb.js` (see `en` for an example). This file lets you specify how to handle inverted Psalm queries, like `23rd Psalm`. If you use the English file as a model, I'd only edit the `regexp` key; the other keys should remain as they are.
#### Building the Language
Run either `npm run build-language fr` (where `fr` is the ISO code of your language, matching the folder name in `src`) or `sh bin/build_lang.sh fr`. It also run tests for that language.
To build a language, you'll need Perl with the JSON package and the dev dependencies. It uses `npx` to execute some functions.
#### Running Tests
If you have dev dependencies installed, the easiest way to run all tests is to use `npm run test`, which takes about a minute. You can also use `npx jasmine test/*.spec.* test/lang/*.spec.js`.
To run tests just for your language: `npx jasmine test/lang/de.spec.js` (replacing `de` with your language, unless your language is German, in which case `de` will work just fine for you).
There are also html files in `test/html` for browser testing. Because they use ES Modules, they won't run locally. ESBuild's [Serve functionality](https://esbuild.github.io/api/#serve) is your friend here.
## Purpose
This is the fourth complete Bible reference parser that I've written. It's how I try out new programming languages: the first one was in PHP (2002), which [saw production usage](https://web.archive.org/web/20100616201608/http://www.gnpcb.org/esv/share/about/) on the ESV Bible website from 2002-2011; the second in Perl (2007), which saw production usage on openbible.info starting in 2007; and the third in Ruby (2009), which never saw production usage because it was way too slow. This parser (at least on Node) is faster than the Perl one and 100 times faster than the Ruby one.
I originally chose Coffeescript out of curiosity—does it make Javascript that much more pleasant to work with? From a programming perspective, the easy loops and array comprehensions alone practically justify its use. From a readability perspective, the code is easier to follow (and come back to months later) than the equivalent Javascript—the tests, in particular, are much easier to read without all the Javascript punctuation.
However, the world has moved on since the original version of this parser in 2011, and Typescript is now standard in many workflows. Javascript has also adopted the easier loops, array comprehensions, and coalescing operators that made Coffeescript attractive. I used ChatGPT to take a first pass at rewriting each Coffeescript function into Typescript.
## License
The code in this project is licensed under the standard MIT License.
## Backlog Items
Here are improvements I have in mind for this parser.
1. Fully migrate language source files (but not the existing language files in `esm` so that you don't need to change any existing code) out of this repo. Target release: 4.0.0 (July 2026).
2. Improve type usage. This is my first experience with Typescript, and I'm confident a lot can be improved.
## Changelog
January 31, 2026 (3.2.0). Full release on Github and npm.
January 28, 2026 (3.2.0-beta).
* This release deprecates the language data files in `src/`. They're rebuilt in (hopefully) more-understandable yaml files in a [new repo](https://github.com/openbibleinfo/Bible-Passage-Reference-Parser-Languages). This change allows the core parser and language files to evolve independently and provides a sounder, less "magical" technical foundation for the languages. The 4.0.0 release will remove the source language files and related build code. To preserve backwards compatibility, there will be no changes to the languages in `esm/` or `js/`. But the language files in `esm/` will be replaced by newly built language files from the new repo. Most importantly, you don't need to change any existing code. If you want to try the new repo, you can clone it and `import` the language file that you want from there.
* Fixed `add_books()` custom patterns so that `osis_and_indices()` works without requiring capture groups. (Thanks to [emmaus-zam](https://github.com/emmaus-zam) for reporting this. Closes #70.)
July 31, 2025 (3.1.0). Full release on Github and npm. Ensured `add_translations()` reflects the current `case_sensitive` option.
July 29, 2025 (3.1.0-beta). Added `bp` and `bcp` for `osis_compaction_strategy` so that you can work at the level of precision that's right for your application. Fixed a bug in `grammar` options that wouldn't always respect `c_sep` values.
July 27, 2025 (3.1.0-alpha). A single peggy grammar now works for all languages, significantly reducing duplicate code. (Consequently, individual languages now export a `grammar_options` variable instead of a `grammar` variable, though that's an implementation detail you shouldn't need to care about.) Added `bcvp` as an `osis_compaction_strategy` to allow parsing output for "John 3:17a" to be structured as "John.3.17!a", following the OSIS spec. (Thanks to [hennessyevan](https://github.com/hennessyevan) for the suggestion.) Added a runtime `grammar` key in `options` to override language-specific parsing features. (Thanks to [renehamburger](https://github.com/renehamburger) for the suggestion.) Added `translations` and `books,translations` as values for `case_sensitive`. Updated dev dependencies to their latest versions.
January 11, 2025 (3.0.0). Full release on Github and npm.
January 9, 2025 (3.0.0-beta2).
* Renamed `/es` to `/esm` to avoid confusion with "es" language.
* Renamed `.cjs` files in `/cjs` to `.js` and added package.json in relevant subfolders to default to treating them as CommonJS files.
* Changed the logic for `non_latin_digits_strategy: "replace"`. It now runs after book parsing instead of before. This change reduces the number of book names created in certain languages, avoids unexpected behavior while reading strings, and makes upcoming changes to the build process easier to implement. Because it's technically a backwards-incompatible change (although in reality there probably aren't practical implications to it), I wanted to get it in as part of the 3.0 release.
January 5, 2025 (3.0-beta). Renamed `add_passage_patterns()` to `add_books()` to better reflect what it does and to match `add_translations()`. The function signature also changed: it's a breaking change from 3.0-alpha. Added `add_translations()` to allow adding new translations at runtime.
January 1, 2025 (3.0-alpha). This release represents a major refactoring from Coffeescript into Typescript, so I want it to settle a bit before publishing it to npm.
The existing `js` folder wasn't touched, and the public API (as described above) hasn't changed in any backwards-incompatible way for CommonJS files. ES modules require a language argument to be passed to the constructor (see [usage](#usage) above).
* Replaced Coffeescript with Typescript. Notably, ES2022 (Node 16 and circa-2021 web browsers) is now the minimum supported version for new files (though the `js` folder still contains files generated for 2.0.1). It should be possible to use `esbuild --target=es2018` if you need something older, but this target isn't explicitly supported. Any older targets won't work. Backwards compatibility for older targets isn't a design goal for this release.
* Added an `es` folder (changed to `esm` in 3.0.0-beta2) to support `import`-style modules usage rather than CommonJS `require` modules. This change also separates the language data from the core parser; you now send language data to the parser object when you construct it.
* Added a `cjs` folder with `.cjs` files (changed to `.js` in 3.0.0-beta2) to support browsers and legacy `require` usage.
* Added an `add_passage_patterns()` (renamed to `add_books()` in 3.0-beta) function to let you add new book patterns at run time instead of at compile time.
* Added the `testaments` option.
* Added a `warn_level` option to show warnings in some cases.
* Added support for newer English translations like CSB and NRSVUE. Thanks to [dwo0](https://github.com/dwo0) for one correction here.
* Added support for Farsi (thanks to [ralaska](https://github.com/ralaska)) and Indonesian.
* Fixed a crashing bug when calling `.parsed_entities()` multiple times consecutively in specific cases.
* Switched from PEG.js to [Peggy](https://github.com/peggyjs/peggy) and from regexgen to [grex](https://github.com/pemistahl/grex) since both are maintained.
* Updated Jasmine to the latest version (5.5.0).
* Added [esbuild](https://esbuild.github.io/) to build different module styles.
May 4, 2017 (2.0.1). Fixed a bug in calculating positions for non-English Psalm titles. Switched to regexgen from frak for more deterministic regular expressions to reduce diff sizes. Added support for Turkish (thanks to [alerque](https://github.com/alerque)).
May 1, 2016 (2.0.0). Added additional Vulgate versification beyond Psalms. Because these changes are technically backwards-incompatible, the major version number is incrementing, but in practice the changes are minor.
November 1, 2015 (1.0.0). Added `punctuation_strategy` option to replace the "eu"-style files that were previously necessary for this functionality. Added `single_chapter_1_strategy` option to allow parsing of "Jude 1" as `Jude.1.1` rather than `Jude.1`. Fixed crashing bug related to dissociated chapter/book ranges. Upgraded to the latest versions of pegjs and Coffeescript. Added npm compatibility. Added support for a "next verse" syntax, which is used in Polish ("n" for next verse, compared to "nn" for "and following"). The parsing grammar includes this support only when the $NEXT variable is set in the language's data.txt file (only Polish for now). Thanks to [nirski](https://github.com/openbibleinfo/Bible-Passage-Reference-Parser/issues/16) for identifying this limitation.
May 4, 2015 (0.10.0). Hand-tuned some of the PEG.js output to improve overall performance by around 50% in most languages.
March 16, 2015 (0.9.0). Added [`parse_with_context()`](#parse_with_context) to let you supply a context for a given string. Added Welsh. Fixed some Somali book names. Added missing punctuation from abbreviations in some languages. Reduced size of "eu" files by omitting needless duplicate code. Improved testing code coverage and added a [fuzz tester](https://github.com/openbibleinfo/Bible-Passage-Reference-Parser/blob/master/bin/fuzz/fuzz_lang.coffee), which uncovered several crashing bugs.
November 3, 2014 (0.8.0). Fixed two bugs related to range rewriting. Updated frak to the latest development version. Added quite a few more languages, bringing the total to 46.
May 2, 2014 (0.7.0). Added the `passage_existence_strategy` option to relax how much validation the parser should do when given a possibly invalid reference. The extensive tests written for this feature uncovered a few other bugs. Added the `book_range_strategy` option to specify how to handle books when they appear in a range. Added [`translation_info()`](#translation_info). Fixed bug when changing versification systems several times and improved support for changing versification systems that rely on a different book order from the default. Updated PEG.js to 0.8.0. Added support for Arabic, Bulgarian, Russian, Thai, and Vietnamese.
November 8, 2013 (0.6.0). Recast English as just another language that uses the same build process as all the other languages. Fixed bug with parentheses in sequences. Made specs runnable using [jasmine-node](https://github.com/mhevery/jasmine-node). Optimized generated regular expressions for speed using [Frak](https://github.com/noprompt/frak). Added support for German, Greek, Italian, and Latin.
May 1, 2013 (0.5.0). Added option to allow case-sensitive book-name matching. Supported parsing `Ps151` as a book rather than a chapter for more-complete OSIS coverage. Added Japanese, Korean, and Chinese book names. Added an additional 90,000 real-world strings, sharing actual counts rather than orders of magnitude.
December 30, 2012 (0.4.0). Per request, added compile tools and Hebrew support.
November 20, 2012 (0.3.0). Improved support for parentheses. Added some alternate versification systems. Added French support. Removed `docs` folder because it was getting unwieldy; the source itself remains commented. Increased the number of real-world strings from 200,000 to 370,000.
May 16, 2012 (0.2.0). Added basic Spanish support. Fixed multiple capital-letter sequences. Upgraded PEG.js and Coffeescript to the latest versions. Deprecated support for IE6 and 7.
November 18, 2011 (0.1.0). First commit.
================================================
FILE: bin/01.add_lang.pl
================================================
use strict;
use warnings;
use Data::Dumper;
use Unicode::Normalize;
use JSON;
use MIME::Base64;
my ($lang) = @ARGV;
die "The first argument should be a language iso code (e.g., \"fr\")" unless ($lang && $lang =~ /^\w+$/);
my $dir = '../src';
my $test_dir = '../test';
my $regexp_space = "\\s";
my $valid_characters = "[\\d\\s.:,;\\x1e\\x1f&\\(\\)\x{ff08}\x{ff09}\\[\\]\\/\"'\\*=~\\-\x{2013}\x{2014}]";
my $letters = '';
my %valid_osises = make_valid_osises(qw(Gen Exod Lev Num Deut Josh Judg Ruth 1Sam 2Sam 1Kgs 2Kgs 1Chr 2Chr Ezra Neh Esth Job Ps Prov Eccl Song Isa Jer Lam Ezek Dan Hos Joel Amos Obad Jonah Mic Nah Hab Zeph Hag Zech Mal Matt Mark Luke John Acts Rom 1Cor 2Cor Gal Eph Phil Col 1Thess 2Thess 1Tim 2Tim Titus Phlm Heb Jas 1Pet 2Pet 1John 2John 3John Jude Rev Tob Jdt GkEsth Wis Sir Bar PrAzar Sus Bel SgThree EpJer 1Macc 2Macc 3Macc 4Macc 1Esd 2Esd PrMan AddEsth AddDan));
my %raw_abbrevs;
my %vars = get_vars();
my %abbrevs = get_abbrevs();
my @order = get_order();
my %all_abbrevs = make_tests();
my $default_alternates_file = "$dir/en/translation_additions.js";
make_grammar_options();
my @translation_regexps = make_translations();
make_regexps(\@translation_regexps);
sub make_translations
{
my $out = get_file_contents("$dir/core/lang_translations.ts");
my (@regexps, @aliases);
foreach my $translation (@{$vars{'$TRANS'}})
{
my ($trans, $osis, $alias) = split /,/, $translation;
push @regexps, $trans;
next unless ($osis || $alias);
$alias = 'default' unless ($alias);
my $lc = lc $trans;
$lc = '"' . $lc . '"' if ($lc =~ /\W/);
my $string = "$lc: {";
$string .= " system: \"$alias\"";
$string .= ", osis: \"$osis\"" if ($osis);
# The comma is OK because `current` and `default` are always at the end.
push @aliases, "$string },";
}
my $alias = join "\x0a\t", @aliases;
if (-f "$dir/$lang/translation_aliases.js")
{
$alias = get_file_contents("$dir/$lang/translation_aliases.js");
$out =~ s/\t+(\$TRANS_ALIAS)/$1/g;
}
my $alternate = get_file_contents($default_alternates_file);
$alternate = get_file_contents("$dir/$lang/translation_additions.js") if (-f "$dir/$lang/translation_additions.js");
$alternate =~ s!^\(?\{[\r\n]+!!;
$alternate =~ s!\}\)?[\r\n]*$!!;
$out =~ s!//\$TRANS_ALIAS!$alias!g;
$out =~ s!//\$TRANS_ALTERNATE!,\n$alternate!g;
my $lang_isos = to_json($vars{'$LANG_ISOS'});
$out =~ s/"\$LANG_ISOS"/$lang_isos/g;
open OUT, ">:utf8", "$dir/$lang/translations.ts";
print OUT $out;
close OUT;
if ($out =~ /(\$[A-Z_]+)/)
{
die "$1\nTranslations: Capital variable";
}
return @regexps;
}
sub make_grammar_options
{
my $out = get_file_contents("$dir/core/lang_grammar_options.ts");
foreach my $key (sort keys %vars)
{
my $safe_key = $key;
$safe_key =~ s/^\$/\\\$/;
$out =~ s/$safe_key(?!\w)/format_var('grammar_options', $key)/ge;
}
$out =~ s/\$NEXT/\\x1f\\x1f\\x1f/g;
if ($out =~ /(\$[A-Z_]+)/)
{
die "$1\nGrammar regexps: Capital variable";
}
open OUT, ">:utf8", "$dir/$lang/grammar_options.ts";
print OUT $out;
close OUT;
}
sub make_regexps
{
my ($translation_regexps) = @_;
make_translations();
my $out = get_file_contents("$dir/core/lang_regexps.ts");
my $translation_regexp = make_book_regexp('translations', $translation_regexps, 1);
$out =~ s/\$TRANS_REGEXP/$translation_regexp/g;
unless (defined $vars{'$NEXT'})
{
$out =~ s/\n.+\$NEXT.+\n/\n/;
die "Regexps: next" if ($out =~ /\$NEXT\b/);
}
my @osises = @order;
foreach my $osis (sort keys %raw_abbrevs)
{
next unless ($osis =~ /,/);
my $temp = $osis;
$temp =~ s/,+$//;
push @osises, {osis => $osis, testament => get_testament($temp)};
}
my $book_regexps = make_regexp_set(@osises);
$out =~ s/\/\/\$BOOK_REGEXPS/$book_regexps/;
$out =~ s/\$VALID_CHARACTERS/$valid_characters/;
$out =~ s/\$PRE_PASSAGE_ALLOWED_CHARACTERS/join('|', @{$vars{'$PRE_PASSAGE_ALLOWED_CHARACTERS'}})/e;
my $pre = join '|', map { format_value('regexp', $_)} @{$vars{'$PRE_BOOK_ALLOWED_CHARACTERS'}};
$out =~ s/\$PRE_BOOK_ALLOWED_CHARACTERS/$pre/;
$pre = join '|', map { format_value('regexp', $_)} @{$vars{'$FULL_PRE_BOOK_ALLOWED_CHARACTERS'}};
$out =~ s/\$FULL_PRE_BOOK_ALLOWED_CHARACTERS/$pre/;
$pre = join '|', map { format_value('regexp', $_)} @{$vars{'$PRE_NUMBER_BOOK_ALLOWED_CHARACTERS'}};
$out =~ s/\$PRE_NUMBER_BOOK_ALLOWED_CHARACTERS/$pre/;
$pre = join '|', map { format_value('regexp', $_)} @{$vars{'$FULL_PRE_NUMBER_BOOK_ALLOWED_CHARACTERS'}};
$out =~ s/\$FULL_PRE_NUMBER_BOOK_ALLOWED_CHARACTERS/join('|', @{$vars{'$FULL_PRE_NUMBER_BOOK_ALLOWED_CHARACTERS'}})/ge;
#die Dumper($vars{'$FULL_PRE_NUMBER_BOOK_ALLOWED_CHARACTERS'});
$pre = join '|', map { format_value('regexp', $_)} @{$vars{'$POST_BOOK_ALLOWED_CHARACTERS'}};
$out =~ s/\$POST_BOOK_ALLOWED_CHARACTERS/$pre/;
my @passage_components;
foreach my $var ('$CHAPTER', '$NEXT', '$FF', '$TO', '$AND', '$VERSE')
{
push @passage_components, map { format_value('regexp', $_) } @{$vars{$var}} if (exists $vars{$var});
}
@passage_components = sort { length $b <=> length $a } @passage_components;
$out =~ s/\$PASSAGE_COMPONENTS/join(' | ', @passage_components)/e;
my $lang_isos = to_json($vars{'$LANG_ISOS'});
$out =~ s/"\$LANG_ISOS"/$lang_isos/g;
$out =~ s/($(?:AND|TO))/format_var('string_raw', $1)/ge;
foreach my $key (sort keys %vars)
{
my $safe_key = $key;
$safe_key =~ s/^\$/\\\$/;
$out =~ s/`$safe_key/"`" . format_var('string_raw', $key)/ge;
$out =~ s/$safe_key(?!\w)/format_var('regexp', $key)/ge;
}
$out =~ s@new RegExp\(String\.raw`((?:.(?!\t\.replace))*?\t)`\.replace\(/\\s\+/g, ""\), "(\w+)"\)@
my ($in_string, $flags) = ($1, $2);
$in_string =~ s/\s+//g;
$in_string =~ s/\//\\\//g;
"/$in_string/$flags";
@ges;
open OUT, ">:utf8", "$dir/$lang/regexps.ts";
print OUT $out;
close OUT;
if ($out =~ /(\$[A-Z_]+)/)
{
die "$1\nRegexps: Capital variable";
}
}
sub make_regexp_set
{
my @out;
my $has_psalm_cb = 0;
foreach my $ref (@_)
{
my $osis = $ref->{osis};
if ($osis eq 'Ps' && !$has_psalm_cb && -f "$dir/$lang/psalm_cb.js")
{
my $out = get_file_contents("$dir/$lang/psalm_cb.js");
$out =~ s@new RegExp\(String\.raw`((?:.(?!\t\)\\b`\.replace))*?.\t\)\\b)`\.replace\(/\\s\+/g, ""\), "(\w+)"\)@
my ($in_string, $flags) = ($1, $2);
$in_string =~ s/\s+//g;
$in_string =~ s/\//\\\//g;
"/$in_string/$flags";
@ges;
push @out, $out;
$has_psalm_cb = 1;
}
my %safes;
foreach my $abbrev (keys %{$raw_abbrevs{$osis}})
{
my $safe = $abbrev;
$safe =~ s/[\[\]\?]//g;
$safes{$abbrev} = length $safe;
}
push @out, make_regexp($osis, sort { $safes{$b} <=> $safes{$a} } keys %safes);
}
return join(",\x0a", @out);
}
sub make_regexp
{
my $osis = shift;
my (@out, @abbrevs);
foreach my $abbrev (@_)
{
$abbrev =~ s/ /$regexp_space*/g;
$abbrev =~ s/[\x{200b}]/my $temp = $regexp_space; $temp =~ s!\]$!\x{200b}]!; "$temp*"/ge;
$abbrev = handle_accents($abbrev);
$abbrev =~ s/(\$[A-Z]+)(?!\w)/format_var('regexp', $1) . "\\.?"/ge;
push @abbrevs, $abbrev;
}
my $book_regexp = make_book_regexp($osis, $all_abbrevs{$osis}, 1);
$osis =~ s/,+$//;
my $osis_json = $osis;
$osis_json =~ s/,/", "/g;
push @out, "\t{\x0a\t\tosis: [\"$osis_json\"],\x0a\t\t";
my $testament = get_testament($osis);
push @out, "testament: \"$testament\",\x0a\t\t";
if (length($testament) > 1) {
my $testament_books = make_testament_books($osis);
push @out, "testament_books: " . JSON->new->canonical->encode($testament_books) . ",\x0a\t\t";
}
my $pre = join '|', @{$vars{'$PRE_BOOK_ALLOWED_CHARACTERS'}};
my $before_pre_book = '(?:^|(?<=';
my $after_pre_book = '))';
$vars{'$FULL_PRE_BOOK_ALLOWED_CHARACTERS'} = ["$before_pre_book$pre$after_pre_book"];
if ($osis =~ /^[0-9]/ || join('|', @abbrevs) =~ /[0-9]/)
{
if ($pre eq '[^\\p{L}]')
{
$pre = '[^\\p{L}\\p{N}])(?<!\d:(?=\d)';
}
else
{
$pre = join '|', map { format_value('quote', $_)} @{$vars{'$PRE_BOOK_ALLOWED_CHARACTERS'}};
$pre = '\b' if ($pre eq "\\\\d|\\\\b");
$pre =~ s/\\+d\|?//;
$pre =~ s/^\|+//;
$pre =~ s/^\||\|\||\|$//; #remove leftover |
$pre =~ s/^\[\^/[^0-9/; #if it's a negated class, add \d
}
$vars{'$PRE_NUMBER_BOOK_ALLOWED_CHARACTERS'} = [$pre];
$vars{'$FULL_PRE_NUMBER_BOOK_ALLOWED_CHARACTERS'} = ["$before_pre_book$pre$after_pre_book"];
#print Dumper($vars{'$FULL_PRE_NUMBER_BOOK_ALLOWED_CHARACTERS'});
}
my $post = join '|', @{$vars{'$POST_BOOK_ALLOWED_CHARACTERS'}};
push @out, "regexp: /(?:^|(?<=$pre))(";
push @out, $book_regexp;
if ($out[-1] =~ /-/)
{
my $temp = $out[-1];
$temp =~ s!\[[^\]]+?\]!###!g;
# This is basically duplicating code in format_var.
if ($temp =~ /-/)
{
$temp = '';
my $in_bracket = 0;
my @chars = split //, $out[-1];
while (@chars)
{
my $char = shift @chars;
if ($char eq '\\')
{
$temp .= $char;
$temp .= shift(@chars);
}
elsif ($char eq '[')
{
die "[ inside bracket: $out[-1]" if ($in_bracket);
$in_bracket = 1;
$temp .= $char;
}
elsif ($char eq '-')
{
if ($in_bracket)
{
$temp .= $char
}
elsif ($chars[0] && $chars[0] ne '?')
{
$temp .= "$char?";
}
else
{
$temp .= $char;
}
}
elsif ($char eq ']')
{
$in_bracket = 0;
$temp .= $char;
}
else
{
$temp .= $char;
}
}
$out[-1] = $temp;
}
}
$post =~ s!(\[[^[\[\]]+?)\[(\x{2019}')\]]!$1$2!;
$vars{'$FULL_POST_BOOK_ALLOWED_CHARACTERS'} = ["(?:(?=$post)|\$)"];
push @out, ")(?:(?=$post)|\$)/giu\x0a\t}";
return join("", @out);
}
sub make_testament_books
{
my @osises = split /,/, $_[0];
my %out;
foreach my $osis (@osises)
{
$out{$osis} = get_testament($osis);
}
return \%out;
}
sub make_book_regexp
{
my ($osis, $abbrevs, $recurse_level, $note) = @_;
#print " Regexping $osis..\n";
map { s/\\//g; } @{$abbrevs};
#my @subsets = get_book_subsets($abbrevs);
my @subsets = ($abbrevs);
my @out;
my $i = 1;
foreach my $subset (@subsets)
{
next unless (@{$subset});
#print "Sub $i\n";
$i++;
#print Dumper($subset);
my $json = JSON->new->ascii(1)->encode($subset);
#print "$json\n";
my $base64 = encode_base64($json, "");
print "$osis " . length($base64) . "\n";
my $use_file = 0;
if (length $base64 > 128_000) #Ubuntu limitation
{
$use_file = 1;
open TEMP, '>./temp.txt';
print TEMP $json;
close TEMP;
$base64 = '<';
}
my $regexp = `node ./make_regexps.js "$base64"`;
#print Dumper($regexp) if ($osis eq 'Acts');
unlink './temp.txt' if ($use_file);
$regexp = decode_json($regexp);
die "No regexp json object" unless (defined $regexp->{patterns});
my @patterns;
foreach my $pattern (@{$regexp->{patterns}})
{
$pattern = format_node_regexp_pattern($pattern);
push @patterns, $pattern;
}
my $pattern = join('|', @patterns);
$pattern = validate_node_regexp($osis, $pattern, $subset, $recurse_level);
push @out, $pattern;
}
validate_full_node_regexp($osis, join('|', @out), $abbrevs);
return join('|', @out);
}
sub validate_full_node_regexp
{
my ($osis, $pattern, $abbrevs) = @_;
foreach my $abbrev (@{$abbrevs})
{
my $compare = "$abbrev 1";
$compare =~ s/^(?:$pattern) //;
print Dumper(" Not parseable ($abbrev): '$compare'\n$pattern") unless ($compare eq '1');
}
}
sub get_book_subsets
{
my @abbrevs = @{$_[0]};
return ([@abbrevs]) unless (scalar @abbrevs > 500);
my @groups = ([]);
my %subs;
@abbrevs = sort { length $b <=> length $a } @abbrevs;
while (@abbrevs)
{
my $long = shift @abbrevs;
#print "$long\n";
next if (exists $subs{$long});
for my $i (0 .. $#abbrevs)
{
my $short = quotemeta $abbrevs[$i];
next unless ($long =~ /(?:^|[\s\p{InPunctuation}\p{Punct}])$short(?:[\s\p{InPunctuation}\p{Punct}]|$)/i);
$subs{$abbrevs[$i]}++;
}
push @{$groups[0]}, $long;
}
$groups[1] = [sort { length $b <=> length $a } keys %subs] if (%subs);
return @groups;
}
sub consolidate_abbrevs
{
my @out;
my $merge_i = -1;
while (@_)
{
my $ref = shift;
if (scalar(keys(%{$ref})) == 2)
{
if ($merge_i == -1)
{
$merge_i = scalar @out;
push @out, [keys %{$ref}];
}
else
{
foreach my $abbrev (keys %{$ref})
{
push @{$out[$merge_i]}, $abbrev;
}
$merge_i = -1 if (scalar @{$out[$merge_i]} > 6);
}
}
else
{
push @out, [keys %{$ref}];
}
}
return @out;
}
sub validate_node_regexp
{
my ($osis, $pattern, $abbrevs, $recurse_level, $note) = @_;
my ($oks, $not_oks) = check_regexp_pattern($osis, $pattern, $abbrevs);
my @oks = @{$oks};
my @not_oks = @{$not_oks};
return $pattern unless (@not_oks);
#print scalar(@not_oks) . " not oks:\n" . Dumper(\@not_oks) . "\n$pattern\n";
if ($recurse_level > 10)
{
print "Splitting $osis by length...\n";
if ($note && $note eq 'lengths')
{
die "'Lengths' didn't work; no pattern available for: $osis / " . Dumper(\@not_oks);
}
my %lengths = split_by_length(@{$abbrevs});
my @patterns;
foreach my $length (sort { $b <=> $a } keys %lengths)
{
# This can lead to an infinite loop if the pattern never matches.
push @patterns, make_book_regexp($osis, $lengths{$length}, 1, 'lengths');
}
return validate_node_regexp($osis, join('|', @patterns), $abbrevs, $recurse_level + 1, 'lengths');
}
print " Recurse ($osis): $recurse_level\n";# if ($recurse_level > 3);
#if ($note && $note eq 'final')
#{
# print Dumper(\@oks);
# print Dumper(\@not_oks);
# exit;
#}
#print Dumper($abbrevs);
#print Dumper(\@oks);
#print Dumper(\@not_oks);
my $ok_pattern = make_book_regexp($osis, \@oks, $recurse_level + 1);
my $not_ok_pattern = make_book_regexp($osis, \@not_oks, $recurse_level + 1);
#print "Nop: $not_ok_pattern\n";
my ($shortest_ok) = sort { length $a <=> length $b } @oks;
my ($shortest_not_ok) = sort { length $a <=> length $b } @not_oks;
my $new_pattern = (length $shortest_ok > length $shortest_not_ok && $recurse_level < 10) ? "$ok_pattern|$not_ok_pattern" : "$not_ok_pattern|$ok_pattern";
$new_pattern = validate_node_regexp($osis, $new_pattern, $abbrevs, $recurse_level + 1, 'final');
#print Dumper($new_pattern);
return $new_pattern;
}
sub get_testament
{
my ($osis) = @_;
$osis =~ s/,+$//;
my (%testaments);
foreach my $book (split(/,/, $osis))
{
my $testament = (exists $valid_osises{$book}) ? $valid_osises{$book} : "";
if ($testament)
{
$testaments{$testament} = 1;
$testaments{a} = 1 if ($book eq 'Ps'); #Ps is both Old Testament and Apocrypha since Ps151 adjusts the number of psalms in Ps.
}
else
{
die "No valid testament for '$book'";
}
}
my $out = '';
foreach my $testament (qw(o n a))
{
$out .= $testament if (exists $testaments{$testament});
}
die "No testament for " . Dumper($osis) unless ($out);
return $out;
}
sub split_by_length
{
my %lengths;
foreach my $abbrev (@_)
{
my $length = int(length($abbrev) / 2);
push @{$lengths{$length}}, $abbrev;
}
return %lengths;
}
sub check_regexp_pattern
{
my ($osis, $pattern, $abbrevs) = @_;
my (@oks, @not_oks);
foreach my $abbrev (@{$abbrevs})
{
my $compare = "$abbrev 1";
$compare =~ s/^(?:$pattern)(?=$valid_characters)//i;
if ($compare ne ' 1')
{
#print "not ok=$compare\nnot ok abbrev=$abbrev\nnot ok pattern=$pattern\n";
push @not_oks, $abbrev;
}
else
{
push @oks, $abbrev;
}
}
return (\@oks, \@not_oks);
}
sub format_node_regexp_pattern
{
my ($pattern) = @_;
die "Unexpected regexp pattern: '$pattern'" unless ($pattern =~ /^\^/ && $pattern =~ /\$$/);
$pattern =~ s/^\^//;
$pattern =~ s/\$$//;
# grex returns `-` as `\-`, which doesn't work with Javascript's /u regexp flag.
$pattern =~ s/\\-/-/g;
$pattern =~ s!([^\\])(/)!$1\\$2!g;
$pattern =~ s/ /\\s*/g;
$pattern =~ s/\x{2009}/\\s/g;
return $pattern;
}
sub format_value
{
my ($type, $value) = @_;
$vars{'$TEMP_VALUE'} = [$value];
return format_var($type, '$TEMP_VALUE');
}
sub format_var
{
my ($type, $var_name) = @_;
my @values = @{$vars{$var_name}};
if ($type eq 'regexp' || $type eq 'quote' || $type eq 'string_raw' || $type eq 'grammar_options')
{
map {
s/\.$// unless ($type eq 'grammar_options');
s/!(.+)$/(?!$1)/;
s/`/\\`/g if ($type eq 'string_raw');
s/\\/\\\\/g if ($type eq 'quote');
s/"/\\"/g if ($type eq 'quote');
} @values;
my $out = join('|', @values);
$out = handle_accents($out);
$out =~ s/ +/\\s+/g;
return $out if ($type eq 'grammar_options');
return (scalar @values > 1) ? '(?:' . $out . ')' : $out;
}
else
{
die "Unknown var type: $type / $var_name";
}
}
sub make_tests
{
my @out;
my @osises = @order;
my %all_abbrevs;
foreach my $osis (sort keys %abbrevs)
{
next unless ($osis =~ /,/);
push @osises, {osis => $osis, testament => get_testament($osis)};
}
foreach my $ref (@osises)
{
my $osis = $ref->{osis};
my @tests;
my ($first) = split /,/, $osis;
my $match = "$first\.1\.1";
foreach my $abbrev (sort_abbrevs_by_length(keys %{$abbrevs{$osis}}))
{
foreach my $expanded (expand_abbrev_vars($abbrev))
{
add_abbrev_to_all_abbrevs($osis, $expanded, \%all_abbrevs);
push @tests, "\t\texpect(p.parse(\"$expanded 1:1\").osis()).toEqual(\"$match\");";
}
foreach my $alt_osis (@osises)
{
next if ($osis eq $alt_osis);
foreach my $alt_abbrev (keys %{$abbrevs{$alt_osis}})
{
next unless (length $alt_abbrev >= length $abbrev);
my $q_abbrev = quotemeta $abbrev;
if ($alt_abbrev =~ /\b$q_abbrev\b/)
{
foreach my $check (@osises)
{
last if ($alt_osis eq $check); # if $alt_osis comes first, that's what we want
next unless ($osis eq $check); # we only care about $osis
print Dumper("$alt_osis should be before $osis in parsing order\n $alt_abbrev matches $abbrev");
}
}
}
}
}
push @out, "describe(\"Localized book $osis ($lang)\", () => {";
push @out, "\tlet p = {}";
push @out, "\tbeforeEach(() => {";
push @out, "\t\tp = new bcv_parser(lang);";
push @out, "\t\tp.set_options({ book_alone_strategy: \"ignore\", book_sequence_strategy: \"ignore\", osis_compaction_strategy: \"bc\", captive_end_digits_strategy: \"delete\", testaments: \"ona\" });";
push @out, "\t});"; # close beforeEach
push @out, "\tit(\"should handle book: $osis ($lang)\", () => {";
push @out, @tests;
push @out, add_non_latin_digit_tests($osis, @tests);
# Don't check for an empty string because books like EpJer will lead to Jer in language-specific ways.
if ($valid_osises{$first} ne 'a')
{
foreach my $abbrev (sort_abbrevs_by_length(keys %{$abbrevs{$osis}}))
{
foreach my $expanded (expand_abbrev_vars($abbrev))
{
$expanded = uc_normalize($expanded);
push @out, "\t\texpect(p.parse(\"$expanded 1:1\").osis()).toEqual(\"$match\");";
}
}
}
push @out, "\t});";
push @out, "});"; #close book describe
}
open OUT, '>:utf8', "$dir/$lang/book_names.txt";
foreach my $osis (sort keys %all_abbrevs)
{
my @osis_abbrevs = sort_abbrevs_by_length(keys %{$all_abbrevs{$osis}});
my $use_osis = $osis;
$use_osis =~ s/,+$//;
foreach my $abbrev (@osis_abbrevs)
{
my $use = $abbrev;
$use =~ s/\x{2009}/ /g;
print OUT "$use_osis\t$use\n";
}
$all_abbrevs{$osis} = \@osis_abbrevs;
}
close OUT;
my @misc_tests;
push @misc_tests, add_range_tests();
push @misc_tests, add_chapter_tests();
push @misc_tests, add_verse_tests();
push @misc_tests, add_sequence_tests();
push @misc_tests, add_title_tests();
push @misc_tests, add_ff_tests();
push @misc_tests, add_next_tests();
push @misc_tests, add_trans_tests();
push @misc_tests, add_book_range_tests();
push @misc_tests, add_boundary_tests();
my $out = get_file_contents("$dir/core/lang_spec.js");
my $lang_isos = to_json($vars{'$LANG_ISOS'});
$out =~ s/\$LANG_ISOS/$lang_isos/g;
$out =~ s/\$LANG/$lang/g;
$out =~ s/\/\/\$BOOK_TESTS/join("\x0a", @out)/e;
$out =~ s/\/\/\$MISC_TESTS/join("\x0a", @misc_tests)/e;
if (-f "$dir/$lang/spec_additions.js") {
$out .= get_file_contents("$dir/$lang/spec_additions.js");
}
open OUT, ">:utf8", "$dir/$lang/spec.js";
print OUT $out;
close OUT;
if ($out =~ /(\$[A-Z]+)/)
{
die "$1\nTests: Capital variable";
}
$out = get_file_contents("$dir/core/lang_specrunner.html");
$out =~ s/\$LANG/$lang/g;
open OUT, ">:utf8", "$test_dir/html/$lang.html";
print OUT $out;
close OUT;
if ($out =~ /(\$[A-Z])/)
{
die "$1\nTests: Capital variable";
}
return %all_abbrevs;
}
sub sort_abbrevs_by_length
{
my (%lengths, @out);
foreach my $abbrev (@_)
{
my $length = length $abbrev;
push @{$lengths{$length}}, $abbrev;
}
foreach my $length (sort { $b <=> $a } keys %lengths)
{
my @abbrevs = sort @{$lengths{$length}};
push @out, @abbrevs;
}
return @out;
}
sub add_abbrev_to_all_abbrevs
{
my ($osis, $abbrev, $all_abbrevs) = @_;
if ($abbrev =~ /\./ && $abbrev ne "\x{418}.\x{41d}")
{
my @news = split /\./, $abbrev;
my @olds = (shift(@news));
foreach my $new (@news)
{
my @temp;
foreach my $old (@olds)
{
push @temp, "$old.$new";
push @temp, "$old$new";
}
@olds = @temp;
}
foreach my $abbrev (@olds)
{
$all_abbrevs->{$osis}->{$abbrev} = 1;
}
}
else
{
$all_abbrevs->{$osis}->{$abbrev} = 1;
}
}
sub add_non_latin_digit_tests
{
my $osis = shift;
my @out;
my $temp = join "\n", @_;
return @out unless ($temp =~ /[\x{0660}-\x{0669}\x{06f0}-\x{06f9}\x{07c0}-\x{07c9}\x{0966}-\x{096f}\x{09e6}-\x{09ef}\x{0a66}-\x{0a6f}\x{0ae6}-\x{0aef}\x{0b66}-\x{0b6f}\x{0be6}-\x{0bef}\x{0c66}-\x{0c6f}\x{0ce6}-\x{0cef}\x{0d66}-\x{0d6f}\x{0e50}-\x{0e59}\x{0ed0}-\x{0ed9}\x{0f20}-\x{0f29}\x{1040}-\x{1049}\x{1090}-\x{1099}\x{17e0}-\x{17e9}\x{1810}-\x{1819}\x{1946}-\x{194f}\x{19d0}-\x{19d9}\x{1a80}-\x{1a89}\x{1a90}-\x{1a99}\x{1b50}-\x{1b59}\x{1bb0}-\x{1bb9}\x{1c40}-\x{1c49}\x{1c50}-\x{1c59}\x{a620}-\x{a629}\x{a8d0}-\x{a8d9}\x{a900}-\x{a909}\x{a9d0}-\x{a9d9}\x{aa50}-\x{aa59}\x{abf0}-\x{abf9}\x{ff10}-\x{ff19}]/);
push @out, "\t\tp.set_options({ non_latin_digits_strategy: \"replace\" });";
return (@out, @_);
}
sub add_range_tests
{
my @out;
push @out, "\tit(\"should handle ranges ($lang)\", () => {";
foreach my $abbrev (@{$vars{'$TO'}})
{
foreach my $to (expand_abbrev(remove_exclamations(handle_accents($abbrev))))
{
push @out, "\t\texpect(p.parse(\"Titus 1:1 $to 2\").osis()).toEqual(\"Titus.1.1-Titus.1.2\");";
push @out, "\t\texpect(p.parse(\"Matt 1${to}2\").osis()).toEqual(\"Matt.1-Matt.2\");";
push @out, "\t\texpect(p.parse(\"Phlm 2 " . uc_normalize($to) . " 3\").osis()).toEqual(\"Phlm.1.2-Phlm.1.3\");";
}
}
push @out, "\t});";
return @out;
}
sub add_chapter_tests
{
my @out;
push @out, "\tit(\"should handle chapters ($lang)\", () => {";
foreach my $abbrev (@{$vars{'$CHAPTER'}})
{
foreach my $chapter (expand_abbrev(remove_exclamations(handle_accents($abbrev))))
{
push @out, "\t\texpect(p.parse(\"Titus 1:1, $chapter 2\").osis()).toEqual(\"Titus.1.1,Titus.2\");";
push @out, "\t\texpect(p.parse(\"Matt 3:4 " . uc_normalize($chapter) . " 6\").osis()).toEqual(\"Matt.3.4,Matt.6\");";
}
}
push @out, "\t});";
return @out;
}
sub add_verse_tests
{
my @out;
push @out, "\tit(\"should handle verses ($lang)\", () => {";
foreach my $abbrev (@{$vars{'$VERSE'}})
{
foreach my $verse (expand_abbrev(remove_exclamations(handle_accents($abbrev))))
{
push @out, "\t\texpect(p.parse(\"Exod 1:1 $verse 3\").osis()).toEqual(\"Exod.1.1,Exod.1.3\");";
push @out, "\t\texpect(p.parse(\"Phlm " . uc_normalize($verse) . " 6\").osis()).toEqual(\"Phlm.1.6\");";
}
}
push @out, "\t});";
return @out;
}
sub add_sequence_tests
{
my @out;
push @out, "\tit(\"should handle 'and' ($lang)\", () => {";
foreach my $abbrev (@{$vars{'$AND'}})
{
foreach my $and (expand_abbrev(remove_exclamations(handle_accents($abbrev))))
{
push @out, "\t\texpect(p.parse(\"Exod 1:1 $and 3\").osis()).toEqual(\"Exod.1.1,Exod.1.3\");";
push @out, "\t\texpect(p.parse(\"Phlm 2 " . uc_normalize($and) . " 6\").osis()).toEqual(\"Phlm.1.2,Phlm.1.6\");";
}
}
push @out, "\t});";
return @out;
}
sub add_title_tests
{
my @out;
push @out, "\tit(\"should handle titles ($lang)\", () => {";
foreach my $abbrev (@{$vars{'$TITLE'}})
{
foreach my $title (expand_abbrev(remove_exclamations(handle_accents($abbrev))))
{
push @out, "\t\texpect(p.parse(\"Ps 3 $title, 4:2, 5:$title\").osis()).toEqual(\"Ps.3.1,Ps.4.2,Ps.5.1\");";
push @out, "\t\texpect(p.parse(\"" . uc_normalize("Ps 3 $title, 4:2, 5:$title") . "\").osis()).toEqual(\"Ps.3.1,Ps.4.2,Ps.5.1\");";
}
}
push @out, "\t});";
return @out;
}
sub add_ff_tests
{
my @out;
push @out, "\tit(\"should handle 'ff' ($lang)\", () => {";
push @out, "\t\tp.set_options({ case_sensitive: \"books\" });" if ($lang eq 'it');
foreach my $abbrev (@{$vars{'$FF'}})
{
foreach my $ff (expand_abbrev(remove_exclamations(handle_accents($abbrev))))
{
push @out, "\t\texpect(p.parse(\"Rev 3$ff, 4:2$ff\").osis()).toEqual(\"Rev.3-Rev.22,Rev.4.2-Rev.4.11\");";
push @out, "\t\texpect(p.parse(\"" . uc_normalize("Rev 3 $ff, 4:2 $ff") . "\").osis()).toEqual(\"Rev.3-Rev.22,Rev.4.2-Rev.4.11\");" unless ($lang eq 'it');
}
}
push @out, "\t\tp.set_options({ case_sensitive: \"none\" });" if ($lang eq 'it');
push @out, "\t});";
return @out;
}
sub add_next_tests
{
return () unless (defined $vars{'$NEXT'});
my @out;
push @out, "\tit(\"should handle 'next' ($lang)\", () => {";
push @out, "\t\tp.set_options({ case_sensitive: \"books\" });" if ($lang eq 'it');
foreach my $abbrev (@{$vars{'$NEXT'}})
{
foreach my $next (expand_abbrev(remove_exclamations(handle_accents($abbrev))))
{
push @out, "\t\texpect(p.parse(\"Rev 3:1$next, 4:2$next\").osis()).toEqual(\"Rev.3.1-Rev.3.2,Rev.4.2-Rev.4.3\");";
push @out, "\t\texpect(p.parse(\"" . uc_normalize("Rev 3 $next, 4:2 $next") . "\").osis()).toEqual(\"Rev.3-Rev.4,Rev.4.2-Rev.4.3\");" unless ($lang eq 'it');
push @out, "\t\texpect(p.parse(\"Jude 1$next, 2$next\").osis()).toEqual(\"Jude.1.1-Jude.1.2,Jude.1.2-Jude.1.3\");";
push @out, "\t\texpect(p.parse(\"Gen 1:31$next\").osis()).toEqual(\"Gen.1.31-Gen.2.1\");";
push @out, "\t\texpect(p.parse(\"Gen 1:2-31$next\").osis()).toEqual(\"Gen.1.2-Gen.2.1\");";
push @out, "\t\texpect(p.parse(\"Gen 1:2$next-30\").osis()).toEqual(\"Gen.1.2-Gen.1.3,Gen.1.30\");";
push @out, "\t\texpect(p.parse(\"Gen 50$next, Gen 50:26$next\").osis()).toEqual(\"Gen.50,Gen.50.26\");";
push @out, "\t\texpect(p.parse(\"Gen 1:32$next, Gen 51$next\").osis()).toEqual(\"\");";
}
}
push @out, "\t\tp.set_options({ case_sensitive: \"none\" });" if ($lang eq 'it');
push @out, "\t});";
return @out;
}
sub add_trans_tests
{
my @out;
push @out, "\tit(\"should handle translations ($lang)\", () => {";
foreach my $abbrev (sort @{$vars{'$TRANS'}})
{
foreach my $translation (expand_abbrev(remove_exclamations(handle_accents($abbrev))))
{
my ($trans, $osis) = split /,/, $translation;
$osis = $trans unless ($osis);
push @out, "\t\texpect(p.parse(\"Lev 1 ($trans)\").osis_and_translations()).toEqual([[\"Lev.1\", \"$osis\"]]);";
push @out, "\t\texpect(p.parse(\"" . lc("Lev 1 $trans") . "\").osis_and_translations()).toEqual([[\"Lev.1\", \"$osis\"]]);";
}
}
push @out, "\t});";
return @out;
}
sub add_book_range_tests
{
my ($first) = expand_abbrev(handle_accents($vars{'$FIRST'}->[0]));
my ($third) = expand_abbrev(handle_accents($vars{'$THIRD'}->[0]));
#my ($and) = sort { length $b <=> length $a } keys %{$vars{'$AND'}};
#my ($to) = sort { length $b <=> length $a } keys %{$vars{'$TO'}};
my $john = '';
foreach my $key (sort keys %{$raw_abbrevs{'1John'}})
{
next unless ($key =~ /^\$FIRST/);
$key =~ s/^\$FIRST(?!\w)//;
$john = $key;
last;
}
unless ($john)
{
print " Warning: no available John abbreviation for testing book ranges\n";
return;
}
my @out;
my @johns = expand_abbrev(handle_accents($john));
push @out, "\tit(\"should handle book ranges ($lang)\", () => {";
push @out, "\t\tp.set_options({ book_alone_strategy: \"full\", book_range_strategy: \"include\" });";
my %alreadys;
foreach my $abbrev (sort @johns)
{
foreach my $to_regex (@{$vars{'$TO'}})
{
foreach my $to (expand_abbrev(remove_exclamations(handle_accents($to_regex))))
{
next if (exists $alreadys{"$first $to $third $abbrev"});
push @out, "\t\texpect(p.parse(\"$first $to $third $abbrev\").osis()).toEqual(\"1John.1-3John.1\");";
$alreadys{"$first $to $third $abbrev"} = 1;
}
}
}
push @out, "\t});";
return @out;
}
sub add_boundary_tests
{
my @out;
push @out, "\tit(\"should handle boundaries ($lang)\", () => {";
push @out, "\t\tp.set_options({ book_alone_strategy: \"full\" });";
push @out, "\t\texpect(p.parse(\"\\u2014Matt\\u2014\").osis()).toEqual(\"Matt.1-Matt.28\");";
push @out, "\t\texpect(p.parse(\"\\u201cMatt 1:1\\u201d\").osis()).toEqual(\"Matt.1.1\");";
push @out, "\t});";
return @out;
}
sub get_abbrevs
{
my %out;
open CORRECTIONS, ">:utf8", "temp.corrections.txt";
my $has_corrections = 0;
open FILE, "<:utf8", "$dir/$lang/data.txt";
while (<FILE>)
{
print "Tab followed by space: $_\n" if (/\t\s/ && /^[^\*]/);
print "Space followed by tab/newline: $_\n" if (/\ [\t\n]/);
next unless (/^[\w\*]/);
print "Regex character in preferred: $_\n" if (/^\*/ && /[\[\?!]/);
next unless (/\t/);
s/[\r\n]+$//;
my $prev = $_;
$_ = NFC(NFD($_));
if ($_ ne $prev)
{
print "Non-normalized text\n";
$has_corrections = 1;
print CORRECTIONS "$_\n";
}
my $is_literal = (/^\*/) ? 1 : 0;
s/([\x80-\x{ffff}])/$1`/g if ($is_literal);
my ($osis, @abbrevs) = split /\t/;
$osis =~ s/^\*//;
is_valid_osis($osis);
$out{$osis}->{$osis} = 1 unless ($osis =~ /,/ || (exists $vars{'$FORCE_OSIS_ABBREV'} && $vars{'$FORCE_OSIS_ABBREV'}->[0] eq 'false'));
foreach my $abbrev (@abbrevs)
{
next unless (length $abbrev);
unless ($is_literal)
{
$abbrev = $vars{'$PRE_BOOK'}->[0] . $abbrev if (exists $vars{'$PRE_BOOK'});
$abbrev .= $vars{'$POST_BOOK'}->[0] if (exists $vars{'$POST_BOOK'});
$raw_abbrevs{$osis}->{$abbrev} = 1;
}
$abbrev = handle_accents($abbrev);
my @alts = expand_abbrev_vars($abbrev);
if (Dumper(\@alts) =~ /.\$/)
{
die "Alts:" . Dumper(\@alts);
}
foreach my $alt (@alts)
{
if ($alt =~ /[\[\?]/)
{
#print Dumper("$osis / $abbrev");
foreach my $expanded (expand_abbrev($alt))
{
$out{$osis}->{$expanded} = 1;
}
}
else
{
#print " $osis abbrev already exists: " . Dumper($abbrev) if (exists $out{$osis}->{$abbrev} && !$is_literal && $abbrev ne $osis && $abbrev !~ /\$/);
$out{$osis}->{$alt} = 1;
}
}
}
}
close FILE;
close CORRECTIONS;
unlink "temp.corrections.txt" unless ($has_corrections);
return %out;
}
sub expand_abbrev_vars
{
my ($abbrev) = @_;
$abbrev =~ s/\\(?![\(\)\[\]\|s])//g;
return ($abbrev) unless ($abbrev =~ /\$[A-Z]+/);
my ($var) = $abbrev =~ /(\$[A-Z]+)(?!\w)/;
my @out;
my $recurse = 0;
foreach my $value (@{$vars{$var}})
{
foreach my $val (expand_abbrev($value))
{
$val = handle_accents($val);
my $temp = $abbrev;
$temp =~ s/\$[A-Z]+(?!\w)/$val/;
$recurse = 1 if ($temp =~ /\$/);
push @out, $temp;
if ($var =~ /^\$(?:FIRST|SECOND|THIRD|FOURTH|FIFTH)$/ && $val =~ /^\d|^[IV]+$/)
{
my $temp2 = $abbrev;
my $safe = quotemeta $var;
$temp2 =~ s/$safe([^.]|$)/$val.$1/;
push @out, $temp2;
}
}
}
if ($recurse)
{
my @temps;
foreach my $abbrev (@out)
{
my @adds = expand_abbrev_vars($abbrev);
push @temps, @adds;
}
@out = @temps;
}
return @out;
}
sub get_order
{
my @out;
open FILE, '<:utf8', "$dir/$lang/data.txt";
while (<FILE>)
{
next unless (/^=/);
s/[\r\n]+$//;
$_ = NFC(NFD($_));
s/^=//;
is_valid_osis($_);
push @out, {osis => $_, testament => $valid_osises{$_}};
$abbrevs{$_}->{$_} = 1;
$raw_abbrevs{$_}->{$_} = 1;
}
close FILE;
return @out;
}
sub get_vars
{
my %out;
open FILE, '<:utf8', "$dir/$lang/data.txt";
while (<FILE>)
{
next unless (/^\$/);
s/[\r\n]+$//;
$_ = NFC(NFD($_));
my ($key, @values) = split /\t/;
die "No values for $key" unless (@values);
$out{$key} = [@values];
}
close FILE;
foreach my $char (@{$out{'$ALLOWED_CHARACTERS'}})
{
my $check = quotemeta $char;
$valid_characters =~ s/\]$/$char]/ unless ($valid_characters =~ /$check/);
}
$letters = get_pre_book_characters($out{'$UNICODE_BLOCK'}, '');
$out{'$PRE_BOOK_ALLOWED_CHARACTERS'} = ["[^\\p{L}]"] unless (exists $out{'$PRE_BOOK_ALLOWED_CHARACTERS'});
$out{'$POST_BOOK_ALLOWED_CHARACTERS'} = [$valid_characters] unless (exists $out{'$POST_BOOK_ALLOWED_CHARACTERS'});
$out{'$PRE_PASSAGE_ALLOWED_CHARACTERS'} = [get_pre_passage_characters($out{'$PRE_BOOK_ALLOWED_CHARACTERS'})] unless (exists $out{'$PRE_PASSAGE_ALLOWED_CHARACTERS'});
$out{'$LANG'} = [$lang];
$out{'$LANG_ISOS'} = [$lang] unless (exists $out{'$LANG_ISOS'});
return %out;
}
sub get_pre_passage_characters
{
my $pattern = join '|', @{$_[0]};
if ($pattern eq "[^\\p{L}]")
{
$pattern = "[^\\x1e\\x1f\\p{L}\\p{N}]";
}
elsif ($pattern =~ /^\[\^[^\]]+?\]$/)
{
$pattern =~ s/`//g;
$pattern =~ s/\\x1[ef]|0-9|\\d|A-Z|a-z//g;
$pattern =~ s/\[\^/[^\\x1e\\x1f\\dA-Za-z/;
}
elsif ($pattern eq '\d|\b')
{
$pattern = '[^\w\x1f\x1e]';
}
else
{
die "Unknown pre_passage pattern: $pattern";
}
return $pattern;
}
sub get_pre_book_characters
{
my ($unicodes_ref) = @_;
die "No \$UNICODE_BLOCK is set" unless (ref $unicodes_ref);
my @blocks = get_unicode_blocks($unicodes_ref);
my @letters = get_letters(@blocks);
my @out;
foreach my $ref (@letters)
{
my ($start, $end) = @{$ref};
push @out, ($end eq $start) ? "$start" :
"$start-$end";
}
my $out = join '', @out;
$out =~ s/([\x80-\x{ffff}])/$1`/g;
return "[^$out]";
}
sub get_letters
{
my %out;
open FILE, 'letters/letters.txt';
while (<FILE>)
{
next unless (/^\\u/);
s/[\r\n]+$//;
s/\\u//g;
s/\s*#.+$//;
s/\s+//g;
my ($start, $end) = split /-/;
$end = $start unless ($end);
($start, $end) = (hex($start), hex($end));
foreach my $ref (@_)
{
my ($start_range, $end_range) = @{$ref};
if ($end >= $start_range && $start <= $end_range)
{
for my $i ($start..$end)
{
next unless ($i >= $start_range && $i <= $end_range);
$out{"$i"} = 1;
}
}
}
}
close FILE;
my $prev = -2;
my @out;
foreach my $pos (sort { $a <=> $b } keys %out)
{
if ($pos == $prev + 1)
{
$out[-1]->[1] = chr $pos;
}
else
{
push @out, [chr $pos, chr $pos];
}
$prev = $pos;
}
return @out;
}
sub get_unicode_blocks
{
my ($unicodes_ref) = @_;
my $unicode = join '|', @{$unicodes_ref};
$unicode .= '|Basic_Latin' unless ($unicode =~ /Basic_Latin/);
my @out;
open FILE, 'letters/blocks.txt';
while (<FILE>)
{
next unless (/^\w/);
s/[\r\n]+$//;
my ($block, $range) = split /\t/;
next unless ($block =~ /$unicode/);
$range =~ s/\\u//g;
my ($start, $end) = split /-/, $range;
push @out, [hex $start, hex $end];
}
close FILE;
return @out;
}
sub expand_abbrev
{
my ($abbrev) = @_;
return ($abbrev) unless ($abbrev =~ /[\[\(?\|\\]/);
$abbrev =~ s/(<!\\)\./\\./g;
my @chars = split //, $abbrev;
my @outs = ('');
while (@chars)
{
my $char = shift @chars;
my $is_optional = 0;
my @nexts;
if ($char eq '[')
{
my @nexts;
while (@chars)
{
my $next = shift @chars;
if ($next eq ']')
{
last;
}
elsif ($next eq '\\')
{
next;
}
else
{
my $accents = handle_accent($next);
$accents =~ s/^\[|\]$//g;
foreach my $accent (split //, $accents)
{
push @nexts, $accent;
}
}
}
($is_optional, @chars) = is_next_char_optional(@chars);
push @nexts, '' if ($is_optional);
my @temps;
foreach my $out (@outs)
{
my %alreadys;
foreach my $next (@nexts)
{
next if (exists $alreadys{$next});
push @temps, "$out$next";
$alreadys{$next} = 1;
}
}
@outs = @temps;
}
elsif ($char eq '(')
{
my @nexts;
while (@chars)
{
my $next = shift @chars;
if (!@nexts && $next eq '?' && $chars[0] eq ':')
{
die "'(?:' in parentheses; replace with just '('";
shift @chars;
next;
}
if ($next eq ')')
{
last;
}
elsif ($next eq '\\')
{
push @nexts, $next;
push @nexts, shift(@chars);
}
else
{
push @nexts, $next;
}
}
@nexts = expand_abbrev(join('', @nexts));
($is_optional, @chars) = is_next_char_optional(@chars);
push @nexts, '' if ($is_optional);
my @temps;
foreach my $out (@outs)
{
foreach my $next (@nexts)
{
push @temps, "$out$next";
}
}
@outs = @temps;
}
elsif ($char eq '|')
{
push @outs, expand_abbrev(join('', @chars));
return @outs;
}
else
{
my @temps;
# Just use the next character
if ($char eq '\\')
{
$char = shift(@chars);
}
($is_optional, @chars) = is_next_char_optional(@chars);
foreach my $out (@outs)
{
push @temps, "$out$char";
push @temps, $out if ($is_optional);
}
@outs = @temps;
}
}
if (join('', @outs) =~ /[\[\]]/)
{
print "Unexpected char: ";
print Dumper(\@outs);
exit;
}
return @outs;
}
sub is_next_char_optional
{
my @chars = @_;
return (0, @chars) unless (@chars);
my $is_optional = 0;
if ($chars[0] eq '?')
{
shift @chars;
$is_optional = 1;
}
return ($is_optional, @chars);
}
sub handle_accents
{
my ($text) = @_;
my @chars = split //, $text;
my @texts;
my $context = '';
while (@chars)
{
my $char = shift @chars;
if ($char =~ /^[\x80-\x{ffff}]$/)
{
# Don't turn it into a class later if it's already in one
if (@chars && $chars[0] eq '`')
{
push @texts, $char;
push @texts, shift @chars;
next;
}
$char = handle_accent($char);
$char =~ s/^\[|\]$//g if ($context eq '[');
}
elsif ($context eq '[' && $char eq "'")
{
push @texts, "\x{2019}'";
$char = '`';
}
elsif (@chars && $chars[0] eq '`')
{
push @texts, $char;
push @texts, shift @chars;
next;
}
elsif ($char eq '[' && !(@texts && $texts[-1] eq '\\'))
{
$context = '[';
}
elsif ($char eq ']' && !(@texts && $texts[-1] eq '\\'))
{
$context = '';
}
push @texts, $char;
}
$text = join '', @texts;
#exit;
#$text =~ s/([\x80-\x{ffff}])(?!`)/handle_accent($1)/ge;
$text =~ s/'(?!`)/[\x{2019}']/g;
$text =~ s/\x{2c8}(?!`)/[\x{2c8}']/g unless (exists $vars{'$COLLAPSE_COMBINING_CHARACTERS'} && $vars{'$COLLAPSE_COMBINING_CHARACTERS'}->[0] eq 'false');
$text =~ s/([\x80-\x{ffff}])`/$1/g;
$text =~ s/[\x{2b9}\x{374}]/['\x{2019}\x{384}\x{374}\x{2b9}]/g;
$text =~ s/([\x{300}\x{370}]-)\['\x{2019}\x{384}\x{374}\x{2b9}\](\x{376})/$1\x{374}$2/;
#$text =~ s/\.$//;
$text =~ s/\.(?!`)/\\.?/g;
$text =~ s/\.`/\\./g;
$text =~ s/ `/\x{2009}/g;
$text =~ s/'`/'/g;
return $text;
}
sub remove_exclamations
{
my ($text) = @_;
($text) = split /!/, $text if ($text =~ /!/);
return $text;
}
sub handle_accent
{
my ($char) = @_;
return $char if (exists $vars{'$COLLAPSE_COMBINING_CHARACTERS'} && $vars{'$COLLAPSE_COMBINING_CHARACTERS'}->[0] eq 'false');
my $alt = NFD($char);
$alt =~ s/\pM//g; # remove combining characters
$alt = NFC($alt);
if ($char ne $alt && length $alt > 0 && $alt =~ /[^\s\d]/)
{
return "[$char$alt]";
}
return $char;
}
sub is_valid_osis
{
my ($osis) = @_;
foreach my $part (split /,/, $osis)
{
die "Invalid OSIS: $osis ($part)" unless (exists $valid_osises{$part});
}
}
sub make_valid_osises
{
my %out;
my $type = 'o';
foreach my $osis (@_)
{
$type = 'n' if ($osis eq 'Matt');
$type = 'a' if ($osis eq 'Tob');
$out{$osis} = $type;
}
return %out;
}
sub uc_normalize
{
my ($text) = @_;
return NFC(uc(NFD($text)));
}
sub get_file_contents
{
open FILE, "<:utf8", $_[0] or die "Couldn't open $_[0]: $!";
my $out = join '', <FILE>;
close FILE;
return $out;
}
================================================
FILE: bin/add_cross_lang.pl
================================================
use strict;
use warnings;
use Unicode::Normalize;
use utf8;
use Data::Dumper;
my $src_dir = '../src';
my %ranges = (
full => {
chars => '.',
data => 'en',
order => [qw(Gen Exod Bel Phlm Lev 2Thess 1Thess 2Kgs 1Kgs EpJer Lam Num Sus Sir PrMan Acts Rev PrAzar SgThree 2Pet 1Pet Rom Song Prov Wis Joel GkEsth Jonah Nah 1John 2John 3John John Josh 1Esd 2Esd Isa 2Sam 1Sam 2Chr 1Chr Ezra Ruth Neh Esth Job Mal Matt Ps Eccl Ezek Hos Obad Hag Hab Mic Zech Zeph Luke Jer 2Cor 1Cor Gal Eph Col 2Tim 1Tim Deut Titus Heb Phil Dan Jude 2Macc 3Macc 4Macc 1Macc Judg Mark Jas Amos Tob Jdt Bar)],
exclude_langs => [qw(amf awa bba bqc bus chr ckb dop dug fue fuh hil hne hwc leb lg mkl mqb mvc mwv nds ny pck ppl qu soy tmz tr twi udu wa wol yo yom zap)],
exclude_abbrevs => [
'Im', #Lev
'И. Н', 'И.Н', #Josh
'Ri', 'Bir', #Judg
'1 K', '1. K', '1.K', '1K', 'I. K', 'I.K', 'IK', 'I. Ki', 'I Ki', '1 Цар', #1Kgs
'2 K', '2. K', '2.K', '2K', 'II. K', 'II.K', 'IIK', '2 Цар', #2Kgs
'Ca', 'En', 'Pi bel Chante a', 'Hoga', 'Sol', 'Ασ', 'பாடல்', 'Solomon', #Song
'Ai', 'La', #Lam
'Ad', #Obad
'Yun', #Jonah
'J', 'Iv', 'In', 'ИН', 'И Н', 'yo', #John
'Nas', 'At', #Acts
'R', #Rom
'Ti', #Titus
],
post_abbrevs => {
Lev => ["\x{5229}", 'Im'],
Josh => ["\x{66f8}"],
Judg => ['Bir'],
'1Kgs' => ['1 Ks', '1. Ks', 'I Ks', 'I. Ks', '1 Re', '1. Re', 'I. Ki', 'I Ki', '1 Цар'],
'2Kgs' => ['2 Ks', '2. Ks', 'II Ks', 'II. Ks', '2 Re', '2. Re', '2 Цар'],
Ezra => ["\x{62c9}"],
Job => ["\x{4f2f}"],
Song => ['Songs', 'Hoga', "\x{6b4c}", 'Sol', 'Ασ', 'பாடல்'],
Lam => ['La'],
Jonah => ['Yun'],
Mic => ['Mi'],
Matt => ["\x{592a}"],
John => ['Jan', "\x{7d04}", 'ИН', 'yo'],
Acts => ["\x{410}\x{43F}\x{43E}\x{441}\x{442}\x{43E}\x{43B}", 'At'],
Rom => ['R'],
Titus => ['Ti'],
Rev => ['Re'],
},
include_extra_abbrevs => 0,
},
ascii => {
chars => "[\x00-\x7f\x{2000}-\x{206F}]",
data => 'en',
order => [qw(Gen Exod Bel Phlm Lev 2Thess 1Thess 2Kgs 1Kgs EpJer Lam Num Sus Sir PrMan Acts Rev PrAzar SgThree 2Pet 1Pet Rom Song Prov Wis Joel Jonah Nah 1John 2John 3John John Josh Judg 1Esd 2Esd Isa 2Sam 1Sam 2Chr 1Chr Ezra Ruth Neh GkEsth Esth Job Mal Matt Ps Eccl Ezek Hos Obad Hag Hab Mic Zech Zeph Luke Jer 2Cor 1Cor Gal Eph Col 2Tim 1Tim Deut Titus Heb Phil Dan Jude 2Macc 3Macc 4Macc 1Macc Mark Jas Amos Tob Jdt Bar)],
exclude_langs => [qw(amf awa bba bqc bus chr ckb dop dug fue fuh hil hne hwc leb lg mkl mqb mvc mwv nds ny pck ppl qu soy tmz tr twi udu wa wol yo yom zap)],
exclude_abbrevs => [
'Im', #Lev
'Ri', 'Bir', #Judg
'1 K', '1. K', '1.K', '1K', 'I. K', 'I.K', 'IK', 'I. Ki', 'I Ki', #1Kgs
'2 K', '2. K', '2.K', '2K', 'II. K', 'II.K', 'IIK', #2Kgs
'Ca', 'En', 'Pi bel Chante a', 'Sol', #Song
'Ai', 'La', #Lam
'Ad', #Obad
'Yun', #Jonah
'J', 'Iv', 'In', 'yo', #John
'Nas', 'At', #Acts
'R', #Rom
'Ti', #Titus
],
post_abbrevs => {
Lev => ['Im'],
'1Kgs' => ['1 Ks', '1. Ks', 'I Ks', 'I. Ks', '1 Re', '1. Re', 'I. Ki', 'I Ki'],
'2Kgs' => ['2 Ks', '2. Ks', 'II Ks', 'II. Ks', '2 Re', '2. Re'],
Judg => ['Bir'],
Song => ['Songs', "\x{6b4c}", 'Sol'],
Lam => ['La'],
Jonah => ['Yun'],
Mic => ['Mi'],
John => ['Jan', 'yo'],
Acts => ['At'],
Rom => ['R'],
Titus => ['Ti'],
Rev => ['Re'],
},
include_extra_abbrevs => 0,
},
);
my @lang_priorities = qw(en es de pt pl zh ru it hu cs uk tl hr sv sk amf tr sr id nl ceb yo);
unless ($ARGV[0] && exists $ranges{$ARGV[0]})
{
die "Need name as first argument: " . join(', ', sort keys %ranges);
}
my $name = $ARGV[0];
my $range = $ranges{$name};
my %abbrevs = get_abbrevs($range);
my ($langs, %abbrev_osis) = arrange_abbrevs_by_osis();
mkdir "$src_dir/$name" unless (-d "$src_dir/$name");
my %excludes = make_excludes($range);
my $order = make_order($range->{order});
my ($data, $used_langs) = make_valid_abbrevs($name, $range->{data}, $range->{chars}, $range->{post_abbrevs}, $excludes{exclude_langs}, $excludes{exclude_abbrevs}, $order);
$data .= $order;
$data =~ s/(\$UNICODE_BLOCK\t)[^\n]+\n/$1 . make_lang_blocks($used_langs) . "\n"/e;
$data = NFC(NFD($data));
open OUT, '>:utf8', "$src_dir/$name/data.txt";
print OUT $data;
close OUT;
sub make_excludes
{
my ($range) = @_;
my %out = (exclude_langs => {}, exclude_abbrevs => {},);
foreach my $key (keys %out)
{
if (exists $range->{$key} && ref $range->{$key})
{
foreach my $value (@{$range->{$key}})
{
$out{$key}->{$value} = 1;
}
}
}
if (exists $range->{post_abbrevs})
{
foreach my $osis (keys %{$range->{post_abbrevs}})
{
foreach my $abbrev (@{$range->{post_abbrevs}->{$osis}})
{
$out{exclude_abbrevs}->{$abbrev} = 1;
}
}
}
return %out;
}
sub make_lang_blocks
{
my ($used_langs) = @_;
my %blocks;
foreach my $lang (keys %{$used_langs})
{
next unless (-f "$src_dir/$lang/data.txt");
open FILE, '<:utf8', "$src_dir/$lang/data.txt";
while (<FILE>)
{
if (/^\$UNICODE_BLOCK\t/)
{
chomp;
my ($key, @blocks) = split /\t/;
foreach my $block (@blocks)
{
$blocks{$block} += $used_langs->{$lang};
}
last;
}
}
close FILE;
}
my @out = sort { $blocks{$b} <=> $blocks{$a} } keys %blocks;
return join("\t", @out);
}
sub make_order
{
my ($lang) = @_;
if (ref $lang)
{
return '=' . join("\n=", @{$lang}) . "\n";
}
my $out;
open FILE, '<:utf8', "$src_dir/$lang/data.txt" or die "$!";
while (<FILE>)
{
next unless (/^=/);
$out .= $_;
}
close FILE;
return $out;
}
sub make_valid_abbrevs
{
my ($name, $lang, $pattern, $post_abbrevs, $exclude_langs, $exclude_abbrevs, $order) = @_;
my (%data, %alreadys, @out);
$order = expand_order($order);
my $data_key = 'pre';
my %used_langs;
open FILE, '<:utf8', "$src_dir/$lang/data.txt" or die "$!";
while (<FILE>)
{
s/[\r\n]+$/\n/;
if (/^\w/)
{
$data_key = 'post';
my ($osis) = /^([\w,]+)\t/;
next unless (exists $abbrev_osis{$osis});
push @out, join("\t", $osis, get_matches($pattern, $abbrev_osis{$osis}, $exclude_langs, $exclude_abbrevs, \%used_langs));
$alreadys{$osis} = 1;
}
elsif (/^[#=]/)
{
# Handle sort order later.
}
elsif (/^\*/)
{
foreach my $abbrev (keys %{$exclude_abbrevs})
{
my $safe = quotemeta $abbrev;
s/\t$safe(?=[\t\n])//g;
}
s/\t+$//;
$data{$data_key} .= $_;
}
else
{
$data{$data_key} .= $_;
}
}
close FILE;
# Get stragglers like "Ezek,Ezra"
foreach my $osis (sort keys %abbrev_osis)
{
next if (exists $alreadys{$osis});
push @out, join("\t", $osis, get_matches($pattern, $abbrev_osis{$osis}, $exclude_langs, $exclude_abbrevs, \%used_langs));
}
if (%{$post_abbrevs})
{
foreach my $osis (sort keys %{$post_abbrevs})
{
push @out, join("\t", "$osis,", @{$post_abbrevs->{$osis}}); #the comma after $osis ensures that it gets treated as a different book
}
}
check_abbrevs($name, $order, @out);
# combining characters are already taken care of in each language's book_names.txt
my $out = $data{pre} . "\$DEFAULT_TRANS_LANG\t$lang\n\$COLLAPSE_COMBINING_CHARACTERS\tfalse\n\$FORCE_OSIS_ABBREV\tfalse\n\$LANG_ISOS\t" . join("\t", sort keys %used_langs) . "\n" . join("\n", @out) . "\n" . $data{post};
$out =~ s/\n{2,}/\n/g;
return ($out, \%used_langs);
}
sub expand_order
{
my ($order) = @_;
$order =~ s/=//g;
$order =~ s/^\s+|\s+$//g;
my @out = split /\n+/, $order;
return \@out;
}
sub check_abbrevs
{
my $name = shift;
my $order = shift;
my (%abbrevs, %all_abbrevs, %order);
my @order = @{$order};
foreach my $osis (@order)
{
$order{$osis} = 1;
}
open OUT, '>:utf8', "$src_dir/$name/conflicts.txt";
foreach my $line (@_)
{
my ($osis, @abbrevs) = split /\t/, $line;
push @order, $osis unless (exists $order{$osis});
foreach my $abbrev (@abbrevs)
{
print "Duplicate: $abbrev / $all_abbrevs{$abbrev} / $osis\n" if (exists $all_abbrevs{$abbrev});
push @{$abbrevs{$osis}}, $abbrev;
$all_abbrevs{$abbrev} = $osis;
}
}
foreach my $osis (keys %abbrevs)
{
$abbrevs{$osis} = [sort { length $b <=> length $a } @{$abbrevs{$osis}}];
map { lc $_ } @{$abbrevs{$osis}};
}
my $i = 1;
while (scalar @order > 1)
{
my $osis = shift @order;
print "$i. $osis\n";
#print "$i / $count\n" if ($i % 1000 == 0);
$i++;
foreach my $abbrev (@{$abbrevs{$osis}})
{
my $safe_abbrev = quotemeta $abbrev;
foreach my $compare_osis (@order)
{
foreach my $compare (@{$abbrevs{$compare_osis}})
{
last if (length $compare <= length $abbrev); #sorted by length, so there will never be a shorter one
next unless ($compare =~ /(?:^|\b|[\s\-])$safe_abbrev(?:[\s\-]|\b|$)/);
#next unless ($compare =~ /Plac/);
print OUT "$osis\t$abbrev\n$compare_osis\t$compare\n\n" ;
print Dumper("Conflict:\n $osis:\t$abbrev\n $compare_osis:\t$compare");
}
}
}
}
close OUT;
}
sub get_matches
{
my ($pattern, $abbrevs, $exclude_langs, $exclude_abbrevs, $used_langs) = @_;
my @out;
ABBREV: foreach my $abbrev (@{$abbrevs})
{
#print Dumper($abbrev) if ($abbrev =~ /^$pattern+$/);
next unless ($abbrev =~ /^$pattern+$/);
next if (exists $exclude_abbrevs->{$abbrev});
my $ok = 0;
foreach my $lang (@{$langs->{$abbrev}})
{
next if (exists $exclude_langs->{$lang});
$used_langs->{$lang}++;
$ok = 1;
}
push @out, $abbrev if ($ok);
}
#die Dumper("No matches: $pattern") unless (@out);
@out = sort { length $b <=> length $a } @out;
print Dumper(\@out) if (Dumper(\@out) =~ /Mak/ && Dumper(\@out) =~ /Eccl/);
return @out;
}
sub arrange_abbrevs_by_osis
{
my (%out, %langs);
foreach my $abbrev (keys %abbrevs)
{
my ($osis) = keys %{$abbrevs{$abbrev}};
if (scalar(keys(%{$abbrevs{$abbrev}})) > 1)
{
$osis = prioritize_lang($abbrev, $abbrevs{$abbrev});
}
push @{$out{$osis}}, $abbrev;
foreach my $lang (keys %{$abbrevs{$abbrev}->{$osis}})
{
if ($lang =~ /[^a-z]/)
{
print Dumper($lang);
print Dumper($abbrev);
print Dumper($abbrevs{$abbrev});
exit;
}
push @{$langs{$abbrev}}, $lang;
}
}
return (\%langs, %out);
}
sub prioritize_lang
{
my ($abbrev, $ref) = @_;
foreach my $lang (@lang_priorities)
{
foreach my $osis (keys %{$ref})
{
return $osis if (exists $ref->{$osis}->{$lang});
}
}
print Dumper("No lang priority for: $abbrev") . Dumper($ref);
return '';
}
sub get_abbrevs
{
my ($range) = @_;
my %out;
opendir SRC, $src_dir;
while (my $lang = readdir SRC)
{
next if (length($lang) > 3);
next unless ($lang =~ /^\w+$/);
next unless (-f "$src_dir/$lang/book_names.txt");
get_abbrevs_from_file("$src_dir/$lang/book_names.txt", \%out, $lang);
}
closedir SRC;
get_abbrevs_from_file("$src_dir/extra/book_names.txt", \%out) if (-f "$src_dir/extra/book_names.txt" && $range->{include_extra_abbrevs});
return %out;
}
sub get_abbrevs_from_file
{
my ($file, $abbrevs, $lang) = @_;
open FILE, '<:utf8', $file or die "$file: $!";
while (<FILE>)
{
next if (/^#/);
s/\s+$//;
#print "$lang: \\u200c\n" if (/\x{200c}/);
my ($osis, $abbrev, @langs) = split /\t/;
next unless ($osis);
if (@langs)
{
foreach my $l (@langs)
{
$abbrevs->{$abbrev}->{$osis}->{$l}++;
}
}
else
{
$abbrevs->{$abbrev}->{$osis}->{$lang}++;
}
}
close FILE;
}
================================================
FILE: bin/build_lang.sh
================================================
#!/usr/bin/bash
if [ -z "$1" ]; then
echo "Please specify an ISO language code to continue, such as: sh build_lang.sh \"fr\""
exit 1
fi
# Prepare the language files.
perl 01.add_lang.pl $1
# Copy the main files.
cp ../src/core/bcv_matcher.ts ./build/
cp ../src/core/bcv_options.ts ./build/
cp ../src/core/bcv_parser.ts ./build/
cp ../src/core/bcv_passage.ts ./build/
cp ../src/core/bcv_regexps_manager.ts ./build/
cp ../src/core/bcv_translations_manager.ts ./build/
cp ../src/core/lang_bundle.ts ./build/
cp ../src/core/types.d.ts ./build/
# Generate the language-independent grammar file.
npx peggy --format es --plugin "../src/core/peg_plugin.js" -o "./build/bcv_grammar.js" "../src/core/bcv_grammar.pegjs"
# The perl script generated these language files.
mv ../src/$1/regexps.ts ./build/bcv_regexps.ts
mv ../src/$1/translations.ts ./build/bcv_translations.ts
mv ../src/$1/grammar_options.ts ./build/bcv_grammar_options.ts
mv ../src/$1/spec.js ../test/lang/$1.spec.js
# Create the ES build files.
npx esbuild ./build/bcv_parser.ts --bundle --target=es2022 --charset=utf8 --format=esm --outfile=../esm/bcv_parser.js
npx esbuild ./build/lang_bundle.ts --bundle --target=es2022 --charset=utf8 --format=esm --outfile=../esm/lang/$1.js
# Also create the typescript definitions, which are the same for every language.
cp ../src/core/lang.d.ts ../esm/lang/$1.d.ts
# Now onto commonjs...
# Remove these exports so that we don't export them from inside the module. `export default` is removed later.
sed '/^export {$/,$d' ./build/bcv_grammar.js > ./build/temp_grammar.js
sed 's/^export default/const grammar_options =/' ./build/bcv_grammar_options.ts > ./build/temp_grammar_options.ts
# Concatenate all the non-imported files together.
cat ./build/bcv_regexps.ts ./build/bcv_translations.ts ./build/temp_grammar.js ./build/bcv_parser.ts ./build/temp_grammar_options.ts > ./build/temp_cjs_bundle.ts
# Remove the unnecessary exported classes from the `ts` modules.
sed "s/export default //g" ./build/temp_cjs_bundle.ts > ./build/cjs_bundle.ts
# Make sure the grammar object is available inside the module.
echo "var grammar = { parse: peg\$parse };" >> ./build/cjs_bundle.ts
# Now build the cjs module. It also works as a browser module thanks to the banner line.
npx esbuild ./build/cjs_bundle.ts --bundle --target=es2022 --charset=utf8 --format=cjs --banner:js='if (typeof module === "undefined") { var module = {}; }' --outfile=../cjs/$1_bcv_parser.js
if [ "$1" = "en" ]; then
# Create a minified file for historical compatibility with pre-v3.
npx esbuild ./build/cjs_bundle.ts --bundle --minify --target=es2022 --charset=utf8 --format=cjs --banner:js='if (typeof module === "undefined") { var module = {}; }' --outfile=../cjs/$1_bcv_parser.min.js
fi
# Uncomment this line to build a minified file in `esm`.
#npx esbuild ./build/bcv_parser.ts --bundle --minify --target=es2022 --charset=utf8 --format=esm --outfile=../esm/$1_bcv_parser.min.js
# Clean up build files.
rm ./build/*
# Run tests.
npx jasmine ../test/lang/$1.spec.js --random=false
================================================
FILE: bin/fuzz_lang.js
================================================
"use strict";
const lang = "en";
import * as fs from "fs";
import { bcv_parser } from "../esm/bcv_parser.js";
const lang_data = await import(`../esm/lang/${lang}.js`);
const max_length = 100;
function get_abbrevs(lang) {
const lines = fs.readFileSync(`../src/${lang}/book_names.txt`).toString().split("\n");
const out = [];
for (const line of lines) {
const [osis, abbrev] = line.split("\t");
if (abbrev != null) {
out.push(abbrev);
}
}
return out;
};
function get_translations(lang) {
return ["AMP", "ASV", "CEB", "CEV", "ERV", "ESV", "HCSB", "KJV", "MSG", "NAB", "NABRE", "NAS", "NASB", "NIRV", "NIV", "NKJV", "NLT", "NRSV", "RSV", "TNIV"];
};
function get_options() {
const lines = fs.readFileSync("../Readme.md").toString().split("\n");
const out = {};
let option = "";
let go = false;
for (const line of lines) {
if (go && line.match(/^### /)) {
break;
}
if (line.match(/^### Options/)) {
go = true;
}
if (!go) {
continue;
}
let result;
if (result = line.match(/^\* `(\w+):/)) {
option = result[1];
out[option] = [];
} else if (result = line.match(/^\t\* `(\w+)`/)) {
out[option].push(result[1]);
}
}
out.passage_existence_strategy = ["b", "bc", "bcv", "bv", "c", "cv", "v", "none"];
out.include_apocrypha = [true, false];
return out;
};
function create_options(keys) {
const out = {};
for (const option of keys) {
out[option] = get_random_item_from_array(options[option]);
}
return out;
};
function get_random_item_from_array(items) {
return items[Math.floor(Math.random() * items.length)];
};
function build_text(keys) {
const out = [];
const length = Math.ceil(Math.random() * max_length);
for (let i = 1; i <= length; i++) {
let token = make_token(get_random_item_from_array(keys));
if (Math.random() >= 0.5) {
token += get_random_item_from_array(possibles.space);
}
out.push(token);
}
return out.join("");
};
function make_token(type) {
const possible = possibles[type];
let token;
if (typeof possible === "string") {
token = build_nested_string(possible);
} else if (type.substr(0, 5) === "char_") {
token = String.fromCharCode(get_random_item_from_array(possible));
} else {
token = get_random_item_from_array(possible);
}
if (Math.random() >= 0.5 && type.match(/^translation/)) {
token = `(${token})`;
}
return token;
};
function build_nested_string(text) {
text = text.replace(/\$(\w+)/g, function(matches, type) {
let match = make_token(type);
if (Math.random() >= 0.5) {
match += get_random_item_from_array(possibles.space);
}
return match;
});
return text;
};
const bcv = new bcv_parser(lang_data);
const possibles = {
book: get_abbrevs(lang),
translation: get_translations(lang),
number: (function() {
const out = [];
for (let j = 0; j <= 1100; j++){ out.push(j); }
return out;
}).apply(this),
chapter: (function() {
var out = [];
for (let j = 0; j <= 152; j++){ out.push(j); }
return out;
}).apply(this),
verse: (function() {
const out = [];
for (let j = 0; j <= 177; j++){ out.push(j); }
return out;
}).apply(this),
cv_sep: [":", ".", "\"", "'", " "],
range_sep: ["-", "\u2013", "\u2014", "through", "thru", "to"],
sequence_sep: [",", ";", "/", ":", "&", "-", "\u2013", "\u2014", "~", "and", "compare", "cf", "cf.", "see also", "also", "see", " "],
title: ["title"],
in_book_of: ["from the book of", "of the book of", "in the book of"],
c_explicit: ["chapters", "chapter", "chapts", "chapts.", "chpts", "chpts.", "chapt", "chapt.", "chaps", "chaps.", "chap", "chap.", "chp", "chp.", "chs", "chs.", "cha", "cha.", "ch", "ch."],
v_explicit: ["verses", "verse", "ver", "ver.", "vss", "vss.", "vs", "vs.", "vv", "vv.", "v", "v."],
v_letter: ["a", "b", "c", "d", "e"],
ff: ["ff", "ff."],
ordinal: ["th", "nd", "st"],
space: [" ", "\t", "\n", "\u00a0"],
punctuation: [",", ".", "!", "?", "-", "'", "\"", "\u2019"],
parentheses: ["(", ")", "[", "]", "{", "}"],
letter: ["f", "g", "h", "n"],
char_ascii: (function() {
const out = [];
for (var j = 0; j <= 127; j++){ out.push(j); }
return out;
}).apply(this),
char_unicode: (function() {
const out = [];
for (var j = 128; j <= 32000; j++){ out.push(j); }
return out;
}).apply(this),
bcv: "$book$chapter$cv_sep$verse",
b_range: "$book$range_sep$book",
translation_sequence: "$translation$sequence_sep$translation",
bc: "$book$chapter",
bc_range: "$book$chapter$range_sep$book",
cb: "$c_explicit$chapter$in_book_of$book",
c_psalm: "$chapter$ordinal$book",
cv_psalm: "$chapter$ordinal$book$v_explicit$verse"
};
const options = get_options();
const possible_keys = Object.keys(possibles);
const option_keys = Object.keys(options);
let total_length = 0;
const start_time = new Date();
for (let i = 1; i <= 5000; i++) {
const my_options = create_options(option_keys);
bcv.set_options(my_options);
const text = build_text(possible_keys);
total_length += text.length;
if (i % 1000 === 0) {
const elapsed_time = Math.round((new Date() - start_time) / 1000);
const bytes_per_second = Math.round(total_length / elapsed_time);
console.log(i, elapsed_time, "sec", Math.round(total_length / 1000), "kb", bytes_per_second, "bps");
}
try {
const results = bcv.parse(text).osis_and_indices();
for (const result of results) {
if (result.indices[0] >= result.indices[1]) {
throw result;
}
}
const entities = JSON.stringify(bcv.parsed_entities());
if (entities.match(/"current/)) {
throw entities;
}
} catch (error) {
const e = error;
console.log(e);
console.log(my_options);
console.log(text);
process.exit();
}
}
================================================
FILE: bin/letters/blocks.txt
================================================
# http://xregexp.com/addons/unicode/unicode-blocks.js
Basic_Latin \u0000-\u007F
Latin_1_Supplement \u0080-\u00FF
Latin_Extended_A \u0100-\u017F
Latin_Extended_B \u0180-\u024F
IPA_Extensions \u0250-\u02AF
Spacing_Modifier_Letters \u02B0-\u02FF
Combining_Diacritical_Marks \u0300-\u036F
Greek_and_Coptic \u0370-\u03FF
Cyrillic \u0400-\u04FF
Cyrillic_Supplement \u0500-\u052F
Armenian \u0530-\u058F
Hebrew \u0590-\u05FF
Arabic \u0600-\u06FF
Syriac \u0700-\u074F
Arabic_Supplement \u0750-\u077F
Thaana \u0780-\u07BF
NKo \u07C0-\u07FF
Samaritan \u0800-\u083F
Mandaic \u0840-\u085F
Arabic_Extended_A \u08A0-\u08FF
Devanagari \u0900-\u097F
Bengali \u0980-\u09FF
Gurmukhi \u0A00-\u0A7F
Gujarati \u0A80-\u0AFF
Oriya \u0B00-\u0B7F
Tamil \u0B80-\u0BFF
Telugu \u0C00-\u0C7F
Kannada \u0C80-\u0CFF
Malayalam \u0D00-\u0D7F
Sinhala \u0D80-\u0DFF
Thai \u0E00-\u0E7F
Lao \u0E80-\u0EFF
Tibetan \u0F00-\u0FFF
Myanmar \u1000-\u109F
Georgian \u10A0-\u10FF
Hangul_Jamo \u1100-\u11FF
Ethiopic \u1200-\u137F
Ethiopic_Supplement \u1380-\u139F
Cherokee \u13A0-\u13FF
Unified_Canadian_Aboriginal_Syllabics \u1400-\u167F
Ogham \u1680-\u169F
Runic \u16A0-\u16FF
Tagalog \u1700-\u171F
Hanunoo \u1720-\u173F
Buhid \u1740-\u175F
Tagbanwa \u1760-\u177F
Khmer \u1780-\u17FF
Mongolian \u1800-\u18AF
Unified_Canadian_Aboriginal_Syllabics_Extended \u18B0-\u18FF
Limbu \u1900-\u194F
Tai_Le \u1950-\u197F
New_Tai_Lue \u1980-\u19DF
Khmer_Symbols \u19E0-\u19FF
Buginese \u1A00-\u1A1F
Tai_Tham \u1A20-\u1AAF
Balinese \u1B00-\u1B7F
Sundanese \u1B80-\u1BBF
Batak \u1BC0-\u1BFF
Lepcha \u1C00-\u1C4F
Ol_Chiki \u1C50-\u1C7F
Sundanese_Supplement \u1CC0-\u1CCF
Vedic_Extensions \u1CD0-\u1CFF
Phonetic_Extensions \u1D00-\u1D7F
Phonetic_Extensions_Supplement \u1D80-\u1DBF
Combining_Diacritical_Marks_Supplement \u1DC0-\u1DFF
Latin_Extended_Additional \u1E00-\u1EFF
Greek_Extended \u1F00-\u1FFF
General_Punctuation \u2000-\u206F
Superscripts_and_Subscripts \u2070-\u209F
Currency_Symbols \u20A0-\u20CF
Combining_Diacritical_Marks_for_Symbols \u20D0-\u20FF
Letterlike_Symbols \u2100-\u214F
Number_Forms \u2150-\u218F
Arrows \u2190-\u21FF
Mathematical_Operators \u2200-\u22FF
Miscellaneous_Technical \u2300-\u23FF
Control_Pictures \u2400-\u243F
Optical_Character_Recognition \u2440-\u245F
Enclosed_Alphanumerics \u2460-\u24FF
Box_Drawing \u2500-\u257F
Block_Elements \u2580-\u259F
Geometric_Shapes \u25A0-\u25FF
Miscellaneous_Symbols \u2600-\u26FF
Dingbats \u2700-\u27BF
Miscellaneous_Mathematical_Symbols_A \u27C0-\u27EF
Supplemental_Arrows_A \u27F0-\u27FF
Braille_Patterns \u2800-\u28FF
Supplemental_Arrows_B \u2900-\u297F
Miscellaneous_Mathematical_Symbols_B \u2980-\u29FF
Supplemental_Mathematical_Operators \u2A00-\u2AFF
Miscellaneous_Symbols_and_Arrows \u2B00-\u2BFF
Glagolitic \u2C00-\u2C5F
Latin_Extended_C \u2C60-\u2C7F
Coptic \u2C80-\u2CFF
Georgian_Supplement \u2D00-\u2D2F
Tifinagh \u2D30-\u2D7F
Ethiopic_Extended \u2D80-\u2DDF
Cyrillic_Extended_A \u2DE0-\u2DFF
Supplemental_Punctuation \u2E00-\u2E7F
CJK_Radicals_Supplement \u2E80-\u2EFF
Kangxi_Radicals \u2F00-\u2FDF
Ideographic_Description_Characters \u2FF0-\u2FFF
CJK_Symbols_and_Punctuation \u3000-\u303F
Hiragana \u3040-\u309F
Katakana \u30A0-\u30FF
Bopomofo \u3100-\u312F
Hangul_Compatibility_Jamo \u3130-\u318F
Kanbun \u3190-\u319F
Bopomofo_Extended \u31A0-\u31BF
CJK_Strokes \u31C0-\u31EF
Katakana_Phonetic_Extensions \u31F0-\u31FF
Enclosed_CJK_Letters_and_Months \u3200-\u32FF
CJK_Compatibility \u3300-\u33FF
CJK_Unified_Ideographs_Extension_A \u3400-\u4DBF
Yijing_Hexagram_Symbols \u4DC0-\u4DFF
CJK_Unified_Ideographs \u4E00-\u9FFF
Yi_Syllables \uA000-\uA48F
Yi_Radicals \uA490-\uA4CF
Lisu \uA4D0-\uA4FF
Vai \uA500-\uA63F
Cyrillic_Extended_B \uA640-\uA69F
Bamum \uA6A0-\uA6FF
Modifier_Tone_Letters \uA700-\uA71F
Latin_Extended_D \uA720-\uA7FF
Syloti_Nagri \uA800-\uA82F
Common_Indic_Number_Forms \uA830-\uA83F
Phags_pa \uA840-\uA87F
Saurashtra \uA880-\uA8DF
Devanagari_Extended \uA8E0-\uA8FF
Kayah_Li \uA900-\uA92F
Rejang \uA930-\uA95F
Hangul_Jamo_Extended_A \uA960-\uA97F
Javanese \uA980-\uA9DF
Cham \uAA00-\uAA5F
Myanmar_Extended_A \uAA60-\uAA7F
Tai_Viet \uAA80-\uAADF
Meetei_Mayek_Extensions \uAAE0-\uAAFF
Ethiopic_Extended_A \uAB00-\uAB2F
Meetei_Mayek \uABC0-\uABFF
Hangul_Syllables \uAC00-\uD7AF
Hangul_Jamo_Extended_B \uD7B0-\uD7FF
High_Surrogates \uD800-\uDB7F
High_Private_Use_Surrogates \uDB80-\uDBFF
Low_Surrogates \uDC00-\uDFFF
Private_Use_Area \uE000-\uF8FF
CJK_Compatibility_Ideographs \uF900-\uFAFF
Alphabetic_Presentation_Forms \uFB00-\uFB4F
Arabic_Presentation_Forms_A \uFB50-\uFDFF
Variation_Selectors \uFE00-\uFE0F
Vertical_Forms \uFE10-\uFE1F
Combining_Half_Marks \uFE20-\uFE2F
CJK_Compatibility_Forms \uFE30-\uFE4F
Small_Form_Variants \uFE50-\uFE6F
Arabic_Presentation_Forms_B \uFE70-\uFEFF
Halfwidth_and_Fullwidth_Forms \uFF00-\uFFEF
Specials \uFFF0-\uFFFF
================================================
FILE: bin/letters/letters.txt
================================================
# http://xregexp.com/addons/unicode/unicode-base.js plus combining characters
#\u0030-\u0039 #numbers
\u0041-\u005A
\u0061-\u007A
\u00AA
\u00B5
\u00BA
\u00C0-\u00D6
\u00D8-\u00F6
\u00F8-\u02C1
\u02C6-\u02D1
\u02E0-\u02E4
\u02EC
\u02EE
\u0370-\u0374
\u0376
\u0377
\u037A-\u037D
\u0386
\u0388-\u038A
\u038C
\u038E-\u03A1
\u03A3-\u03F5
\u03F7-\u0481
\u048A-\u0527
\u0531-\u0556
\u0559
\u0561-\u0587
\u05D0-\u05EA
\u05F0-\u05F2
\u0620-\u064A
\u066E
\u066F
\u0671-\u06D3
\u06D5
\u06E5
\u06E6
\u06EE
\u06EF
\u06FA-\u06FC
\u06FF
\u0710
\u0712-\u072F
\u074D-\u07A5
\u07B1
\u07CA-\u07EA
\u07F4
\u07F5
\u07FA
\u0800-\u0815
\u081A
\u0824
\u0828
\u0840-\u0858
\u08A0
\u08A2-\u08AC
\u0904-\u0939
\u093D
\u0950
\u0958-\u0961
\u0971-\u0977
\u0979-\u097F
\u0985-\u098C
\u098F
\u0990
\u0993-\u09A8
\u09AA-\u09B0
\u09B2
\u09B6-\u09B9
\u09BD
\u09CE
\u09DC
\u09DD
\u09DF-\u09E1
\u09F0
\u09F1
\u0A05-\u0A0A
\u0A0F
\u0A10
\u0A13-\u0A28
\u0A2A-\u0A30
\u0A32
\u0A33
\u0A35
\u0A36
\u0A38
\u0A39
\u0A59-\u0A5C
\u0A5E
\u0A72-\u0A74
\u0A85-\u0A8D
\u0A8F-\u0A91
\u0A93-\u0AA8
\u0AAA-\u0AB0
\u0AB2
\u0AB3
\u0AB5-\u0AB9
\u0ABD
\u0AD0
\u0AE0
\u0AE1
\u0B05-\u0B0C
\u0B0F
\u0B10
\u0B13-\u0B28
\u0B2A-\u0B30
\u0B32
\u0B33
\u0B35-\u0B39
\u0B3D
\u0B5C
\u0B5D
\u0B5F-\u0B61
\u0B71
\u0B83
\u0B85-\u0B8A
\u0B8E-\u0B90
\u0B92-\u0B95
\u0B99
\u0B9A
\u0B9C
\u0B9E
\u0B9F
\u0BA3
\u0BA4
\u0BA8-\u0BAA
\u0BAE-\u0BB9
\u0BD0
\u0C05-\u0C0C
\u0C0E-\u0C10
\u0C12-\u0C28
\u0C2A-\u0C33
\u0C35-\u0C39
\u0C3D
\u0C58
\u0C59
\u0C60
\u0C61
\u0C85-\u0C8C
\u0C8E-\u0C90
\u0C92-\u0CA8
\u0CAA-\u0CB3
\u0CB5-\u0CB9
\u0CBD
\u0CDE
\u0CE0
\u0CE1
\u0CF1
\u0CF2
\u0D05-\u0D0C
\u0D0E-\u0D10
\u0D12-\u0D3A
\u0D3D
\u0D4E
\u0D60
\u0D61
\u0D7A-\u0D7F
\u0D85-\u0D96
\u0D9A-\u0DB1
\u0DB3-\u0DBB
\u0DBD
\u0DC0-\u0DC6
\u0E01-\u0E30
\u0E32
\u0E33
\u0E40-\u0E46
\u0E81
\u0E82
\u0E84
\u0E87
\u0E88
\u0E8A
\u0E8D
\u0E94-\u0E97
\u0E99-\u0E9F
\u0EA1-\u0EA3
\u0EA5
\u0EA7
\u0EAA
\u0EAB
\u0EAD-\u0EB0
\u0EB2
\u0EB3
\u0EBD
\u0EC0-\u0EC4
\u0EC6
\u0EDC-\u0EDF
\u0F00
\u0F40-\u0F47
\u0F49-\u0F6C
\u0F88-\u0F8C
\u1000-\u102A
\u103F
\u1050-\u1055
\u105A-\u105D
\u1061
\u1065
\u1066
\u106E-\u1070
\u1075-\u1081
\u108E
\u10A0-\u10C5
\u10C7
\u10CD
\u10D0-\u10FA
\u10FC-\u1248
\u124A-\u124D
\u1250-\u1256
\u1258
\u125A-\u125D
\u1260-\u1288
\u128A-\u128D
\u1290-\u12B0
\u12B2-\u12B5
\u12B8-\u12BE
\u12C0
\u12C2-\u12C5
\u12C8-\u12D6
\u12D8-\u1310
\u1312-\u1315
\u1318-\u135A
\u1380-\u138F
\u13A0-\u13F4
\u1401-\u166C
\u166F-\u167F
\u1681-\u169A
\u16A0-\u16EA
\u1700-\u170C
\u170E-\u1711
\u1720-\u1731
\u1740-\u1751
\u1760-\u176C
\u176E-\u1770
\u1780-\u17B3
\u17D7
\u17DC
\u1820-\u1877
\u1880-\u18A8
\u18AA
\u18B0-\u18F5
\u1900-\u191C
\u1950-\u196D
\u1970-\u1974
\u1980-\u19AB
\u19C1-\u19C7
\u1A00-\u1A16
\u1A20-\u1A54
\u1AA7
\u1B05-\u1B33
\u1B45-\u1B4B
\u1B83-\u1BA0
\u1BAE
\u1BAF
\u1BBA-\u1BE5
\u1C00-\u1C23
\u1C4D-\u1C4F
\u1C5A-\u1C7D
\u1CE9-\u1CEC
\u1CEE-\u1CF1
\u1CF5
\u1CF6
\u1D00-\u1DBF
\u1E00-\u1F15
\u1F18-\u1F1D
\u1F20-\u1F45
\u1F48-\u1F4D
\u1F50-\u1F57
\u1F59
\u1F5B
\u1F5D
\u1F5F-\u1F7D
\u1F80-\u1FB4
\u1FB6-\u1FBC
\u1FBE
\u1FC2-\u1FC4
\u1FC6-\u1FCC
\u1FD0-\u1FD3
\u1FD6-\u1FDB
\u1FE0-\u1FEC
\u1FF2-\u1FF4
\u1FF6-\u1FFC
\u2071
\u207F
\u2090-\u209C
\u20D0-\u20FF #combining
\u2102
\u2107
\u210A-\u2113
\u2115
\u2119-\u211D
\u2124
\u2126
\u2128
\u212A-\u212D
\u212F-\u2139
\u213C-\u213F
\u2145-\u2149
\u214E
\u2183
\u2184
\u2C00-\u2C2E
\u2C30-\u2C5E
\u2C60-\u2CE4
\u2CEB-\u2CEE
\u2CF2
\u2CF3
\u2D00-\u2D25
\u2D27
\u2D2D
\u2D30-\u2D67
\u2D6F
\u2D80-\u2D96
\u2DA0-\u2DA6
\u2DA8-\u2DAE
\u2DB0-\u2DB6
\u2DB8-\u2DBE
\u2DC0-\u2DC6
\u2DC8-\u2DCE
\u2DD0-\u2DD6
\u2DD8-\u2DDE
\u2E2F
\u3005
\u3006
\u3031-\u3035
\u303B
\u303C
\u3041-\u3096
\u309D-\u309F
\u30A1-\u30FA
\u30FC-\u30FF
\u3105-\u312D
\u3131-\u318E
\u31A0-\u31BA
\u31F0-\u31FF
\u3400-\u4DB5
\u4E00-\u9FCC
\uA000-\uA48C
\uA4D0-\uA4FD
\uA500-\uA60C
\uA610-\uA61F
\uA62A
\uA62B
\uA640-\uA66E
\uA67F-\uA697
\uA6A0-\uA6E5
\uA717-\uA71F
\uA722-\uA788
\uA78B-\uA78E
\uA790-\uA793
\uA7A0-\uA7AA
\uA7F8-\uA801
\uA803-\uA805
\uA807-\uA80A
\uA80C-\uA822
\uA840-\uA873
\uA882-\uA8B3
\uA8F2-\uA8F7
\uA8FB
\uA90A-\uA925
\uA930-\uA946
\uA960-\uA97C
\uA984-\uA9B2
\uA9CF
\uAA00-\uAA28
\uAA40-\uAA42
\uAA44-\uAA4B
\uAA60-\uAA76
\uAA7A
\uAA80-\uAAAF
\uAAB1
\uAAB5
\uAAB6
\uAAB9-\uAABD
\uAAC0
\uAAC2
\uAADB-\uAADD
\uAAE0-\uAAEA
\uAAF2-\uAAF4
\uAB01-\uAB06
\uAB09-\uAB0E
\uAB11-\uAB16
\uAB20-\uAB26
\uAB28-\uAB2E
\uABC0-\uABE2
\uAC00-\uD7A3
\uD7B0-\uD7C6
\uD7CB-\uD7FB
\uF900-\uFA6D
\uFA70-\uFAD9
\uFB00-\uFB06
\uFB13-\uFB17
\uFB1D
\uFB1F-\uFB28
\uFB2A-\uFB36
\uFB38-\uFB3C
\uFB3E
\uFB40
\uFB41
\uFB43
\uFB44
\uFB46-\uFBB1
\uFBD3-\uFD3D
\uFD50-\uFD8F
\uFD92-\uFDC7
\uFDF0-\uFDFB
\uFE70-\uFE74
\uFE76-\uFEFC
\uFF21-\uFF3A
\uFF41-\uFF5A
\uFF66-\uFFBE
\uFFC2-\uFFC7
\uFFCA-\uFFCF
\uFFD2-\uFFD7
\uFFDA-\uFFDC
#Mn Nonspacing marks act here as non-word-boundaries
\u0300-\u036F
\u0483-\u0487
\u0591-\u05BD
\u05BF
\u05C1
\u05C2
\u05C4
\u05C5
\u05C7
\u0610-\u061A
\u064B-\u065F
\u0670
\u06D6-\u06DC
\u06DF-\u06E4
\u06E7
\u06E8
\u06EA-\u06ED
\u0711
\u0730-\u074A
\u07A6-\u07B0
\u07EB-\u07F3
\u0816-\u0819
\u081B-\u0823
\u0825-\u0827
\u0829-\u082D
\u0859-\u085B
\u08E4-\u08FE
\u0900-\u0902
\u093A
\u093C
\u0941-\u0948
\u094D
\u0951-\u0957
\u0962
\u0963
\u0981
\u09BC
\u09C1-\u09C4
\u09CD
\u09E2
\u09E3
\u0A01
\u0A02
\u0A3C
\u0A41
\u0A42
\u0A47
\u0A48
\u0A4B-\u0A4D
\u0A51
\u0A70
\u0A71
\u0A75
\u0A81
\u0A82
\u0ABC
\u0AC1-\u0AC5
\u0AC7
\u0AC8
\u0ACD
\u0AE2
\u0AE3
\u0B01
\u0B3C
\u0B3F
\u0B41-\u0B44
\u0B4D
\u0B56
\u0B62
\u0B63
\u0B82
\u0BC0
\u0BCD
\u0C3E-\u0C40
\u0C46-\u0C48
\u0C4A-\u0C4D
\u0C55
\u0C56
\u0C62
\u0C63
\u0CBC
\u0CBF
\u0CC6
\u0CCC
\u0CCD
\u0CE2
\u0CE3
\u0D41-\u0D44
\u0D4D
\u0D62
\u0D63
\u0DCA
\u0DD2-\u0DD4
\u0DD6
\u0E31
\u0E34-\u0E3A
\u0E47-\u0E4E
\u0EB1
\u0EB4-\u0EB9
\u0EBB
\u0EBC
\u0EC8-\u0ECD
\u0F18
\u0F19
\u0F35
\u0F37
\u0F39
\u0F71-\u0F7E
\u0F80-\u0F84
\u0F86
\u0F87
\u0F8D-\u0F97
\u0F99-\u0FBC
\u0FC6
\u102D-\u1030
\u1032-\u1037
\u1039
\u103A
\u103D
\u103E
\u1058
\u1059
\u105E-\u1060
\u1071-\u1074
\u1082
\u1085
\u1086
\u108D
\u109D
\u135D-\u135F
\u1712-\u1714
\u1732-\u1734
\u1752
\u1753
\u1772
\u1773
\u17B4
\u17B5
\u17B7-\u17BD
\u17C6
\u17C9-\u17D3
\u17DD
\u180B-\u180D
\u18A9
\u1920-\u1922
\u1927
\u1928
\u1932
\u1939-\u193B
\u1A17
\u1A18
\u1A56
\u1A58-\u1A5E
\u1A60
\u1A62
\u1A65-\u1A6C
\u1A73-\u1A7C
\u1A7F
\u1B00-\u1B03
\u1B34
\u1B36-\u1B3A
\u1B3C
\u1B42
\u1B6B-\u1B73
\u1B80
\u1B81
\u1BA2-\u1BA5
\u1BA8
\u1BA9
\u1BAB
\u1BE6
\u1BE8
\u1BE9
\u1BED
\u1BEF-\u1BF1
\u1C2C-\u1C33
\u1C36
\u1C37
\u1CD0-\u1CD2
\u1CD4-\u1CE0
\u1CE2-\u1CE8
\u1CED
\u1CF4
\u1DC0-\u1DE6
\u1DFC-\u1DFF
\u1DC0-\u1DFF #not just Mn
\u20D0-\u20DC
\u20E1
\u20E5-\u20F0
\u2CEF-\u2CF1
\u2D7F
\u2DE0-\u2DFF
\u302A-\u302D
\u3099
\u309A
\uA66F
\uA674-\uA67D
\uA69F
\uA6F0
\uA6F1
\uA802
\uA806
\uA80B
\uA825
\uA826
\uA8C4
\uA8E0-\uA8F1
\uA926-\uA92D
\uA947-\uA951
\uA980-\uA982
\uA9B3
\uA9B6-\uA9B9
\uA9BC
\uAA29-\uAA2E
\uAA31
\uAA32
\uAA35
\uAA36
\uAA43
\uAA4C
\uAAB0
\uAAB2-\uAAB4
\uAAB7
\uAAB8
\uAABE
\uAABF
\uAAC1
\uAAEC
\uAAED
\uAAF6
\uABE5
\uABE8
\uABED
\uAC00-\uD7AF #Hangul syllables
\uFB1E
\uFE00-\uFE0F
\uFE20-\uFE26
\uFE20-\uFE2F #not just Mn
================================================
FILE: bin/make_regexps.js
================================================
import { createRequire } from 'module';
const require = createRequire(import.meta.url);
const { RegExpBuilder } = require("@pemistahl/grex");
const fs = require("fs");
//console.log(process.argv);
let arg;
if (process.argv[2] === "<") {
// Not base64-encoded when written to a file
arg = fs.readFileSync("./temp.txt").toString('utf8');
}
else {
arg = Buffer.from(process.argv[2], 'base64').toString('utf8');
}
let strings = JSON.parse(arg)
//console.log(strings)
let loop_count = 1;
const out = [];
const pattern = RegExpBuilder.from(strings).withMinimumSubstringLength(3).build();
out.push(pattern)
const re = new RegExp(pattern)
let ok_count = 0
const redos = []
let max_length = 0
for (let i = 0, max = strings.length; i < max; i++) {
const ok = re.test(strings[i])
//console.log(ok + "\t", strings[i]);
if (ok === true) {
if (strings[i].length > max_length) {
max_length = strings[i].length
}
ok_count++
}
else {
redos.push(strings[i]);
throw("No pattern match for " + strings[i]);
}
}
console.log(JSON.stringify({"patterns": out}).replace(/\\\\u/g, "\\u"))
================================================
FILE: cjs/ar_bcv_parser.js
================================================
if (typeof module === "undefined") { var module = {}; }
"use strict";
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// build/cjs_bundle.ts
var cjs_bundle_exports = {};
__export(cjs_bundle_exports, {
bcv_parser: () => bcv_parser
});
module.exports = __toCommonJS(cjs_bundle_exports);
// build/bcv_grammar.js
var peg$SyntaxError = class extends SyntaxError {
constructor(message, expected, found, location) {
super(message);
this.expected = expected;
this.found = found;
this.location = location;
this.name = "SyntaxError";
}
format(sources) {
let str = "Error: " + this.message;
if (this.location) {
let src = null;
const st = sources.find((s2) => s2.source === this.location.source);
if (st) {
src = st.text.split(/\r\n|\n|\r/g);
}
const s = this.location.start;
const offset_s = this.location.source && typeof this.location.source.offset === "function" ? this.location.source.offset(s) : s;
const loc = this.location.source + ":" + offset_s.line + ":" + offset_s.column;
if (src) {
const e = this.location.end;
const filler = "".padEnd(offset_s.line.toString().length, " ");
const line = src[s.line - 1];
const last = s.line === e.line ? e.column : line.length + 1;
const hatLen = last - s.column || 1;
str += "\n --> " + loc + "\n" + filler + " |\n" + offset_s.line + " | " + line + "\n" + filler + " | " + "".padEnd(s.column - 1, " ") + "".padEnd(hatLen, "^");
} else {
str += "\n at " + loc;
}
}
return str;
}
static buildMessage(expected, found) {
function hex(ch) {
return ch.codePointAt(0).toString(16).toUpperCase();
}
const nonPrintable = Object.prototype.hasOwnProperty.call(RegExp.prototype, "unicode") ? new RegExp("[\\p{C}\\p{Mn}\\p{Mc}]", "gu") : null;
function unicodeEscape(s) {
if (nonPrintable) {
return s.replace(nonPrintable, (ch) => "\\u{" + hex(ch) + "}");
}
return s;
}
function literalEscape(s) {
return unicodeEscape(s.replace(/\\/g, "\\\\").replace(/"/g, '\\"').replace(/\0/g, "\\0").replace(/\t/g, "\\t").replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/[\x00-\x0F]/g, (ch) => "\\x0" + hex(ch)).replace(/[\x10-\x1F\x7F-\x9F]/g, (ch) => "\\x" + hex(ch)));
}
function classEscape(s) {
return unicodeEscape(s.replace(/\\/g, "\\\\").replace(/\]/g, "\\]").replace(/\^/g, "\\^").replace(/-/g, "\\-").replace(/\0/g, "\\0").replace(/\t/g, "\\t").replace(/\n/g, "\\n").replace(/\r/g, "\\r").replace(/[\x00-\x0F]/g, (ch) => "\\x0" + hex(ch)).replace(/[\x10-\x1F\x7F-\x9F]/g, (ch) => "\\x" + hex(ch)));
}
const DESCRIBE_EXPECTATION_FNS = {
literal(expectation) {
return '"' + literalEscape(expectation.text) + '"';
},
class(expectation) {
const escapedParts = expectation.parts.map(
(part) => Array.isArray(part) ? classEscape(part[0]) + "-" + classEscape(part[1]) : classEscape(part)
);
return "[" + (expectation.inverted ? "^" : "") + escapedParts.join("") + "]" + (expectation.unicode ? "u" : "");
},
any() {
return "any character";
},
end() {
return "end of input";
},
other(expectation) {
return expectation.description;
}
};
function describeExpectation(expectation) {
return DESCRIBE_EXPECTATION_FNS[expectation.type](expectation);
}
function describeExpected(expected2) {
const descriptions = expected2.map(describeExpectation);
descriptions.sort();
if (descriptions.length > 0) {
let j = 1;
for (let i = 1; i < descriptions.length; i++) {
if (descriptions[i - 1] !== descriptions[i]) {
descriptions[j] = descriptions[i];
j++;
}
}
descriptions.length = j;
}
switch (descriptions.length) {
case 1:
return descriptions[0];
case 2:
return descriptions[0] + " or " + descriptions[1];
default:
return descriptions.slice(0, -1).join(", ") + ", or " + descriptions[descriptions.length - 1];
}
}
function describeFound(found2) {
return found2 ? '"' + literalEscape(found2) + '"' : "end of input";
}
return "Expected " + describeExpected(expected) + " but " + describeFound(found) + " found.";
}
};
function peg$parse(input, options) {
options = options !== void 0 ? options : {};
const peg$FAILED = {};
const peg$source = options.grammarSource;
const peg$startRuleFunctions = {
start: peg$parsestart
};
let peg$startRuleFunction = peg$parsestart;
const peg$c0 = "(";
const peg$c1 = ")";
const peg$c2 = "";
const peg$c3 = "/";
const peg$c4 = ",";
const peg$c5 = ".";
const peg$c6 = "-";
const peg$c7 = "$ordinal";
const peg$c8 = "/1";
const peg$c9 = "$ff_value";
const peg$c10 = "$next_value";
const peg$c11 = "/9";
const peg$c12 = "/2";
const peg$c13 = ".1";
const peg$c14 = "$ab";
const peg$c15 = "$c_explicit_value";
const peg$c16 = "$c_sep_value";
const peg$c17 = "$v_explicit_value";
const peg$c18 = "$cv_sep";
const peg$c19 = "$cv_sep_weak";
const peg$c20 = "$sequence_sep_value";
const peg$c21 = "$range_sep";
const peg$c22 = "$title_value";
const peg$c23 = "$in_book_of";
const peg$c24 = "";
const peg$c25 = "$integer_value";
const peg$c26 = "$space";
const peg$r0 = /^[1-8]/;
const peg$r1 = /^[0-9]/;
const peg$r2 = /^[([]/;
const peg$r3 = /^[)\]]/;
const peg$r4 = /^[^\x1F\x1E([]/;
const peg$e0 = peg$literalExpectation("(", false);
const peg$e1 = peg$literalExpectation(")", false);
const peg$e2 = peg$literalExpectation("", false);
const peg$e3 = peg$literalExpectation("/", false);
const peg$e4 = peg$classExpectation([["1", "8"]], false, false, false);
const peg$e5 = peg$literalExpectation(",", false);
const peg$e6 = peg$literalExpectation(".", false);
const peg$e7 = peg$literalExpectation("-", false);
const peg$e8 = peg$literalExpectation("$ordinal", false);
const peg$e9 = peg$literalExpectation("/1", false);
const peg$e10 = peg$literalExpectation("$ff_value", false);
const peg$e11 = peg$literalExpectation("$next_value", false);
const peg$e12 = peg$literalExpectation("/9", false);
const peg$e13 = peg$literalExpectation("/2", false);
const peg$e14 = peg$literalExpectation(".1", false);
const peg$e15 = peg$classExpectation([["0", "9"]], false, false, false);
const peg$e16 = peg$literalExpectation("$ab", false);
const peg$e17 = peg$literalExpectation("$c_explicit_value", false);
const peg$e18 = peg$literalExpectation("$c_sep_value", false);
const peg$e19 = peg$literalExpectation("$v_explicit_value", false);
const peg$e20 = peg$literalExpectation("$cv_sep", false);
const peg$e21 = peg$literalExpectation("$cv_sep_weak", false);
const peg$e22 = peg$literalExpectation("$sequence_sep_value", false);
const peg$e23 = peg$literalExpectation("$range_sep", false);
const peg$e24 = peg$literalExpectation("$title_value", false);
const peg$e25 = peg$literalExpectation("$in_book_of", false);
const peg$e26 = peg$classExpectation(["(", "["], false, false, false);
const peg$e27 = peg$classExpectation([")", "]"], false, false, false);
const peg$e28 = peg$literalExpectation("", false);
const peg$e29 = peg$literalExpectation("$integer_value", false);
const peg$e30 = peg$classExpectation(["", "", "(", "["], true, false, false);
const peg$e31 = peg$literalExpectation("$space", false);
function peg$f0(val_1, sep_val, seq_post) {
if (sep_val && sep_val.type && sep_val.type === "c_explicit") seq_post.explicit_context = "c";
return [seq_post];
}
function peg$f1(val_1, val_2) {
val_2.unshift([val_1]);
const r = range();
return { type: "sequence", value: val_2, indices: [r.start, r.end - 1] };
}
function peg$f2(sep_val_1, val_1, sep_val, seq_post) {
if (sep_val && sep_val.type && sep_val.type === "c_explicit") seq_post.explicit_context = "c";
return [seq_post];
}
function peg$f3(sep_val_1, val_1, val_2) {
if (typeof val_2 === "undefined") val_2 = [];
if (sep_val_1 && sep_val_1.type && sep_val_1.type === "c_explicit") val_1.explicit_context = "c";
val_2.unshift([val_1]);
const r = range();
return { type: "sequence_post_enclosed", value: val_2, indices: [r.start, r.end - 1] };
}
function peg$f4(val_1, val_2) {
if (val_1.length && val_1.length === 2) val_1 = val_1[0];
const r = range();
return { type: "range", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f5(val) {
const r = range();
return { type: "b", value: val.value, indices: [r.start, r.end - 1] };
}
function peg$f6(val_1, val_2) {
const r = range();
return { type: "bc", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f7(val_1, val_2) {
const r = range();
return { type: "bc", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f8(val_1) {
return val_1.value[1].value[0].partial == null;
}
function peg$f9(val_1, val_2) {
const r = range();
return { type: "bc_title", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f10(val_1, val_2) {
const r = range();
return { type: "bcv", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f11(val_1) {
return val_1.value[1].value[0].partial == null;
}
function peg$f12(val_1, val_2) {
const r = range();
return { type: "bcv", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f13(val_1) {
return val_1.value[1].value[0].partial == null;
}
function peg$f14(val_1, val_2) {
const r = range();
return { type: "bcv", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f15(val_1, val_2) {
return val_2.value[0].partial == null;
}
function peg$f16(val_1, val_2, val_3, val_4) {
const r = range();
return { type: "range", value: [{ type: "bcv", value: [{ type: "bc", value: [val_1, val_2], indices: [val_1.indices[0], val_2.indices[1]] }, val_3], indices: [val_1.indices[0], val_3.indices[1]] }, val_4], indices: [r.start, r.end - 1] };
}
function peg$f17(val_1, val_2) {
const r = range();
return { type: "bv", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f18(val) {
const r = range();
return { type: "c", value: [val], indices: [r.start, r.end - 1] };
}
function peg$f19(val_1) {
return val_1.value[0].partial == null;
}
function peg$f20(val_1, val_2) {
const r = range();
return { type: "bc", value: [val_2, val_1], indices: [r.start, r.end - 1] };
}
function peg$f21(val_1) {
return val_1.value[0].partial == null;
}
function peg$f22(val_1, val_2, val_3) {
const r = range();
return { type: "cb_range", value: [val_3, val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f23(val_1, val_2) {
const r = range();
return { type: "bcv", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f24(val_1, val_2) {
const r = range();
return { type: "bc", value: [val_2, val_1], indices: [r.start, r.end - 1] };
}
function peg$f25(val_1, val_2) {
const r = range();
return { type: "bcv", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f26(val) {
const r = range();
return { type: "c_psalm", value: val.value, indices: [r.start, r.end - 1] };
}
function peg$f27(val_1, val_2) {
const r = range();
return { type: "cv_psalm", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f28(val_1) {
return val_1.value[0].partial == null;
}
function peg$f29(val_1, val_2) {
const r = range();
return { type: "c_title", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f30(val_1) {
return val_1.value[0].partial == null;
}
function peg$f31(val_1, val_2) {
const r = range();
return { type: "cv", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f32(val_1) {
return val_1.value[0].partial == null;
}
function peg$f33(val_1, val_2) {
const r = range();
return { type: "cv", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f34(val_1) {
const r = range();
return { type: "ff", value: [val_1], indices: [r.start, r.end - 1] };
}
function peg$f35(val_1) {
const r = range();
return { type: "next_v", value: [val_1], indices: [r.start, r.end - 1] };
}
function peg$f36(val_1, val_2) {
const r = range();
return { type: "integer_title", value: [val_1, val_2], indices: [r.start, r.end - 1] };
}
function peg$f37(val) {
const r = range();
return { type: "context", value: val.value, indices: [r.start, r.end - 1] };
}
function peg$f38(val) {
const r = range();
return { type: "b", value: val.value, indices: [r.start, r.end - 1] };
}
function peg$f39(val) {
const r = range();
return { type: "bc", value: [val, { type: "c", value: [{ type: "integer", value: 151, indices: [r.end - 2, r.end - 1] }], indices: [r.end - 2, r.end - 1] }], indices: [r.start, r.end - 1] };
}
function peg$f40(val_1, val_2) {
const r = range();
return { type: "bcv", value: [val_1, { type: "v", value: [val_2], indices: [val_2.indices[0], val_2.indices[1]] }], indices: [r.start, r.end - 1] };
}
function peg$f41(val) {
const r = range();
return { type: "v", value: [val], indices: [r.start, r.end - 1] };
}
function peg$f42() {
return { type: "c_explicit" };
}
function peg$f43() {
return { type: "c_explicit" };
}
function peg$f44() {
return { type: "v_explicit" };
}
function peg$f45() {
return "";
}
function peg$f46(val) {
const r = range();
return { type: "title", value: [val], indices: [r.start, r.end - 1] };
}
function peg$f47(val) {
const r = range();
return { type: "translation_sequence", value: val, indices: [r.start, r.end - 1] };
}
function peg$f48(val) {
const r = range();
return { type: "translation_sequence", value: val, indices: [r.start, r.end - 1] };
}
function peg$f49(val) {
const r = range();
return { type: "translation", value: val.value, indices: [r.start, r.end - 1] };
}
function peg$f50(val_1, val_2) {
const r = range();
return { type: "integer", value: parseInt(val_1.join(""), 10), partial: val_2 != null ? val_2[1].join("") : null, indices: [r.start, r.end - 1] };
}
function peg$f51(val) {
const r = range();
return { type: "integer", value: parseInt(val.join(""), 10), indices: [r.start, r.end - 1] };
}
function peg$f52(val) {
const r = range();
return { type: "word", value: val.join(""), indices: [r.start, r.end - 1] };
}
function peg$f53(val) {
const r = range();
return { type: "stop", value: val, indices: [r.start, r.end - 1] };
}
let peg$currPos = options.peg$currPos | 0;
let peg$savedPos = peg$currPos;
const peg$posDetailsCache = [{ line: 1, column: 1 }];
let peg$maxFailPos = peg$currPos;
let peg$maxFailExpected = options.peg$maxFailExpected || [];
let peg$silentFails = options.peg$silentFails | 0;
let peg$result;
if (options.startRule) {
if (!(options.startRule in peg$startRuleFunctions)) {
throw new Error(`Can't start parsing from rule "` + options.startRule + '".');
}
peg$startRuleFunction = peg$startRuleFunctions[options.startRule];
}
function text() {
return input.substring(peg$savedPos, peg$currPos);
}
function offset() {
return peg$savedPos;
}
function range() {
return {
source: peg$source,
start: peg$savedPos,
end: peg$currPos
};
}
function location() {
return peg$computeLocation(peg$savedPos, peg$currPos);
}
function expected(description, location2) {
location2 = location2 !== void 0 ? location2 : peg$computeLocation(peg$savedPos, peg$currPos);
throw peg$buildStructuredError(
[peg$otherExpectation(description)],
input.substring(peg$savedPos, peg$currPos),
location2
);
}
function error(message, location2) {
location2 = location2 !== void 0 ? location2 : peg$computeLocation(peg$savedPos, peg$currPos);
throw peg$buildSimpleError(message, location2);
}
function peg$getUnicode(pos = peg$currPos) {
const cp = input.codePointAt(pos);
if (cp === void 0) {
return "";
}
return String.fromCodePoint(cp);
}
function peg$literalExpectation(text2, ignoreCase) {
return { type: "literal", text: text2, ignoreCase };
}
function peg$classExpectation(parts, inverted, ignoreCase, unicode) {
return { type: "class", parts, inverted, ignoreCase, unicode };
}
function peg$anyExpectation() {
return { type: "any" };
}
function peg$endExpectation() {
return { type: "end" };
}
function peg$otherExpectation(description) {
return { type: "other", description };
}
function peg$computePosDetails(pos) {
let details = peg$posDetailsCache[pos];
let p;
if (details) {
return details;
} else {
if (pos >= peg$posDetailsCache.length) {
p = peg$posDetailsCache.length - 1;
} else {
p = pos;
while (!peg$posDetailsCache[--p]) {
}
}
details = peg$posDetailsCache[p];
details = {
line: details.line,
column: details.column
};
while (p < pos) {
if (input.charCodeAt(p) === 10) {
details.line++;
details.column = 1;
} else {
details.column++;
}
p++;
}
peg$posDetailsCache[pos] = details;
return details;
}
}
function peg$computeLocation(startPos, endPos, offset2) {
const startPosDetails = peg$computePosDetails(startPos);
const endPosDetails = peg$computePosDetails(endPos);
const res = {
source: peg$source,
start: {
offset: startPos,
line: startPosDetails.line,
column: startPosDetails.column
},
end: {
offset: endPos,
line: endPosDetails.line,
column: endPosDetails.column
}
};
if (offset2 && peg$source && typeof peg$source.offset === "function") {
res.start = peg$source.offset(res.start);
res.end = peg$source.offset(res.end);
}
return res;
}
function peg$fail(expected2) {
if (peg$currPos < peg$maxFailPos) {
return;
}
if (peg$currPos > peg$maxFailPos) {
peg$maxFailPos = peg$currPos;
peg$maxFailExpected = [];
}
peg$maxFailExpected.push(expected2);
}
function peg$buildSimpleError(message, location2) {
return new peg$SyntaxError(message, null, null, location2);
}
function peg$buildStructuredError(expected2, found, location2) {
return new peg$SyntaxError(
peg$SyntaxError.buildMessage(expected2, found),
expected2,
found,
location2
);
}
function peg$parsestart() {
let s0, s1;
s0 = [];
s1 = peg$parsebcv_hyphen_range();
if (s1 === peg$FAILED) {
s1 = peg$parsesequence();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_range();
if (s1 === peg$FAILED) {
s1 = peg$parserange();
if (s1 === peg$FAILED) {
s1 = peg$parsenext_v();
if (s1 === peg$FAILED) {
s1 = peg$parseff();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_comma();
if (s1 === peg$FAILED) {
s1 = peg$parsebc_title();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsebv();
if (s1 === peg$FAILED) {
s1 = peg$parsec_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parseb();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsecb();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsetranslation_sequence_enclosed();
if (s1 === peg$FAILED) {
s1 = peg$parsetranslation_sequence();
if (s1 === peg$FAILED) {
s1 = peg$parsesequence_sep();
if (s1 === peg$FAILED) {
s1 = peg$parsec_title();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger_title();
if (s1 === peg$FAILED) {
s1 = peg$parsecv();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger();
if (s1 === peg$FAILED) {
s1 = peg$parsec();
if (s1 === peg$FAILED) {
s1 = peg$parsev();
if (s1 === peg$FAILED) {
s1 = peg$parseword();
if (s1 === peg$FAILED) {
s1 = peg$parseword_parenthesis();
if (s1 === peg$FAILED) {
s1 = peg$parsecontext();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
if (s1 !== peg$FAILED) {
while (s1 !== peg$FAILED) {
s0.push(s1);
s1 = peg$parsebcv_hyphen_range();
if (s1 === peg$FAILED) {
s1 = peg$parsesequence();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_range();
if (s1 === peg$FAILED) {
s1 = peg$parserange();
if (s1 === peg$FAILED) {
s1 = peg$parsenext_v();
if (s1 === peg$FAILED) {
s1 = peg$parseff();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_comma();
if (s1 === peg$FAILED) {
s1 = peg$parsebc_title();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsebv();
if (s1 === peg$FAILED) {
s1 = peg$parsec_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parseb();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsecb();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsetranslation_sequence_enclosed();
if (s1 === peg$FAILED) {
s1 = peg$parsetranslation_sequence();
if (s1 === peg$FAILED) {
s1 = peg$parsesequence_sep();
if (s1 === peg$FAILED) {
s1 = peg$parsec_title();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger_title();
if (s1 === peg$FAILED) {
s1 = peg$parsecv();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger();
if (s1 === peg$FAILED) {
s1 = peg$parsec();
if (s1 === peg$FAILED) {
s1 = peg$parsev();
if (s1 === peg$FAILED) {
s1 = peg$parseword();
if (s1 === peg$FAILED) {
s1 = peg$parseword_parenthesis();
if (s1 === peg$FAILED) {
s1 = peg$parsecontext();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
} else {
s0 = peg$FAILED;
}
return s0;
}
function peg$parsesequence() {
let s0, s1, s2, s3, s4, s5;
s0 = peg$currPos;
s1 = peg$parsecb_range();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_hyphen_range();
if (s1 === peg$FAILED) {
s1 = peg$parserange();
if (s1 === peg$FAILED) {
s1 = peg$parsenext_v();
if (s1 === peg$FAILED) {
s1 = peg$parseff();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_comma();
if (s1 === peg$FAILED) {
s1 = peg$parsebc_title();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsebv();
if (s1 === peg$FAILED) {
s1 = peg$parsec_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parseb();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsecb();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsecontext();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
if (s1 !== peg$FAILED) {
s2 = [];
s3 = peg$currPos;
s4 = peg$parsec_sep();
if (s4 === peg$FAILED) {
s4 = peg$parsesequence_sep();
if (s4 === peg$FAILED) {
s4 = null;
}
}
if (s4 !== peg$FAILED) {
s5 = peg$parsesequence_post();
if (s5 !== peg$FAILED) {
peg$savedPos = s3;
s3 = peg$f0(s1, s4, s5);
} else {
peg$currPos = s3;
s3 = peg$FAILED;
}
} else {
peg$currPos = s3;
s3 = peg$FAILED;
}
if (s3 !== peg$FAILED) {
while (s3 !== peg$FAILED) {
s2.push(s3);
s3 = peg$currPos;
s4 = peg$parsec_sep();
if (s4 === peg$FAILED) {
s4 = peg$parsesequence_sep();
if (s4 === peg$FAILED) {
s4 = null;
}
}
if (s4 !== peg$FAILED) {
s5 = peg$parsesequence_post();
if (s5 !== peg$FAILED) {
peg$savedPos = s3;
s3 = peg$f0(s1, s4, s5);
} else {
peg$currPos = s3;
s3 = peg$FAILED;
}
} else {
peg$currPos = s3;
s3 = peg$FAILED;
}
}
} else {
s2 = peg$FAILED;
}
if (s2 !== peg$FAILED) {
peg$savedPos = s0;
s0 = peg$f1(s1, s2);
} else {
peg$currPos = s0;
s0 = peg$FAILED;
}
} else {
peg$currPos = s0;
s0 = peg$FAILED;
}
return s0;
}
function peg$parsesequence_post_enclosed() {
let s0, s1, s2, s3, s4, s5, s6, s7, s8;
s0 = peg$currPos;
if (input.charCodeAt(peg$currPos) === 40) {
s1 = peg$c0;
peg$currPos++;
} else {
s1 = peg$FAILED;
if (peg$silentFails === 0) {
peg$fail(peg$e0);
}
}
if (s1 !== peg$FAILED) {
s2 = peg$parsesp();
s3 = peg$parsec_sep();
if (s3 === peg$FAILED) {
s3 = peg$parsesequence_sep();
if (s3 === peg$FAILED) {
s3 = null;
}
}
if (s3 !== peg$FAILED) {
s4 = peg$parsesequence_post();
if (s4 !== peg$FAILED) {
s5 = [];
s6 = peg$currPos;
s7 = peg$parsec_sep();
if (s7 === peg$FAILED) {
s7 = peg$parsesequence_sep();
if (s7 === peg$FAILED) {
s7 = null;
}
}
if (s7 !== peg$FAILED) {
s8 = peg$parsesequence_post();
if (s8 !== peg$FAILED) {
peg$savedPos = s6;
s6 = peg$f2(s3, s4, s7, s8);
} else {
peg$currPos = s6;
s6 = peg$FAILED;
}
} else {
peg$currPos = s6;
s6 = peg$FAILED;
}
while (s6 !== peg$FAILED) {
s5.push(s6);
s6 = peg$currPos;
s7 = peg$parsec_sep();
if (s7 === peg$FAILED) {
s7 = peg$parsesequence_sep();
if (s7 === peg$FAILED) {
s7 = null;
}
}
if (s7 !== peg$FAILED) {
s8 = peg$parsesequence_post();
if (s8 !== peg$FAILED) {
peg$savedPos = s6;
s6 = peg$f2(s3, s4, s7, s8);
} else {
peg$currPos = s6;
s6 = peg$FAILED;
}
} else {
peg$currPos = s6;
s6 = peg$FAILED;
}
}
s6 = peg$parsesp();
if (input.charCodeAt(peg$currPos) === 41) {
s7 = peg$c1;
peg$currPos++;
} else {
s7 = peg$FAILED;
if (peg$silentFails === 0) {
peg$fail(peg$e1);
}
}
if (s7 !== peg$FAILED) {
peg$savedPos = s0;
s0 = peg$f3(s3, s4, s5);
} else {
peg$currPos = s0;
s0 = peg$FAILED;
}
} else {
peg$currPos = s0;
s0 = peg$FAILED;
}
} else {
peg$currPos = s0;
s0 = peg$FAILED;
}
} else {
peg$currPos = s0;
s0 = peg$FAILED;
}
return s0;
}
function peg$parsesequence_post() {
let s0;
s0 = peg$parsesequence_post_enclosed();
if (s0 === peg$FAILED) {
s0 = peg$parsecb_range();
if (s0 === peg$FAILED) {
s0 = peg$parsebcv_hyphen_range();
if (s0 === peg$FAILED) {
s0 = peg$parserange();
if (s0 === peg$FAILED) {
s0 = peg$parsenext_v();
if (s0 === peg$FAILED) {
s0 = peg$parseff();
if (s0 === peg$FAILED) {
s0 = peg$parsebcv_comma();
if (s0 === peg$FAILED) {
s0 = peg$parsebc_title();
if (s0 === peg$FAILED) {
s0 = peg$parseps151_bcv();
if (s0 === peg$FAILED) {
s0 = peg$parsebcv();
if (s0 === peg$FAILED) {
s0 = peg$parsebcv_weak();
if (s0 === peg$FAILED) {
s0 = peg$parseps151_bc();
if (s0 === peg$FAILED) {
s0 = peg$parsebc();
if (s0 === peg$FAILED) {
s0 = peg$parsecv_psalm();
if (s0 === peg$FAILED) {
s0 = peg$parsebv();
if (s0 === peg$FAILED) {
s0 = peg$parsec_psalm();
if (s0 === peg$FAILED) {
s0 = peg$parseb();
if (s0 === peg$FAILED) {
s0 = peg$parsecbv();
if (s0 === peg$FAILED) {
s0 = peg$parsecbv_ordinal();
if (s0 === peg$FAILED) {
s0 = peg$parsecb();
if (s0 === peg$FAILED) {
s0 = peg$parsecb_ordinal();
if (s0 === peg$FAILED) {
s0 = peg$parsec_title();
if (s0 === peg$FAILED) {
s0 = peg$parseinteger_title();
if (s0 === peg$FAILED) {
s0 = peg$parsecv();
if (s0 === peg$FAILED) {
s0 = peg$parsecv_weak();
if (s0 === peg$FAILED) {
s0 = peg$parseinteger();
if (s0 === peg$FAILED) {
s0 = peg$parsec();
if (s0 === peg$FAILED) {
s0 = peg$parsev();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
return s0;
}
function peg$parserange() {
let s0, s1, s2, s3, s4, s5, s6;
s0 = peg$currPos;
s1 = peg$parsebcv_comma();
if (s1 === peg$FAILED) {
s1 = peg$parsebc_title();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv();
if (s1 === peg$FAILED) {
s1 = peg$parsebcv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseps151_bc();
if (s1 === peg$FAILED) {
s1 = peg$parsebc();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsebv();
if (s1 === peg$FAILED) {
s1 = peg$currPos;
s2 = peg$parseb();
if (s2 !== peg$FAILED) {
s3 = peg$currPos;
peg$silentFails++;
s4 = peg$currPos;
s5 = peg$parserange_sep();
if (s5 !== peg$FAILED) {
s6 = peg$parsebcv_comma();
if (s6 === peg$FAILED) {
s6 = peg$parsebc_title();
if (s6 === peg$FAILED) {
s6 = peg$parseps151_bcv();
if (s6 === peg$FAILED) {
s6 = peg$parsebcv();
if (s6 === peg$FAILED) {
s6 = peg$parsebcv_weak();
if (s6 === peg$FAILED) {
s6 = peg$parseps151_bc();
if (s6 === peg$FAILED) {
s6 = peg$parsebc();
if (s6 === peg$FAILED) {
s6 = peg$parsebv();
if (s6 === peg$FAILED) {
s6 = peg$parseb();
}
}
}
}
}
}
}
}
if (s6 !== peg$FAILED) {
s5 = [s5, s6];
s4 = s5;
} else {
peg$currPos = s4;
s4 = peg$FAILED;
}
} else {
peg$currPos = s4;
s4 = peg$FAILED;
}
peg$silentFails--;
if (s4 !== peg$FAILED) {
peg$currPos = s3;
s3 = void 0;
} else {
s3 = peg$FAILED;
}
if (s3 !== peg$FAILED) {
s2 = [s2, s3];
s1 = s2;
} else {
peg$currPos = s1;
s1 = peg$FAILED;
}
} else {
peg$currPos = s1;
s1 = peg$FAILED;
}
if (s1 === peg$FAILED) {
s1 = peg$parsecbv();
if (s1 === peg$FAILED) {
s1 = peg$parsecbv_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsec_psalm();
if (s1 === peg$FAILED) {
s1 = peg$parsecb();
if (s1 === peg$FAILED) {
s1 = peg$parsecb_ordinal();
if (s1 === peg$FAILED) {
s1 = peg$parsec_title();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger_title();
if (s1 === peg$FAILED) {
s1 = peg$parsecv();
if (s1 === peg$FAILED) {
s1 = peg$parsecv_weak();
if (s1 === peg$FAILED) {
s1 = peg$parseinteger();
if (s1 === peg$FAILED) {
s1 = peg$parsec();
if (s1 === peg$FAILED) {
s1 = peg$parsev();
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
if (s1 !== peg$FAILED) {
s2 = peg$parserange_sep();
if (s2 !== peg$FAILED) {
s3 = peg$parsenext_v();
if (s3 === peg$FAILED) {
s3 = peg$parseff();
if (s3 === peg$FAILED) {
s3 = peg$parsebcv_comma();
if (s3 === peg$FAILED) {
s3 = peg$parsebc_title();
if (s3 === peg$FAILED) {
s3 = peg$parseps151_bcv();
if (s3 === peg$FAILED) {
s3 = peg$parsebcv();
if (s3 === peg$FAILED) {
s3 = peg$parsebcv_weak();
if (s3 === peg$FAILED) {
s3 = peg$parseps151_bc();
if (s3 === peg$FAILED) {
s3 = peg$parsebc();
if (s3 === peg$FAILED) {
s3 = peg$parsecv_psalm();
if (s3 === peg$FAILED) {
s3 = peg$parsebv();
if (s3 === peg$FAILED) {
s3 = peg$parseb();
if (s3 === peg$FAILED) {
s3 = peg$parsecbv();
if (s3 === peg$FAILED) {
s3 = peg$parsecbv_ordinal();
if (s3 === peg$FAILED) {
s3 = peg$parsec_psalm();
if (s3 === peg$FAILED) {
s3 = peg$parsecb();
if (s3 === peg$FAILED) {
s3 = peg$parsecb_ordinal();
if (s3 === peg$FAILED) {
s3 = peg$parsec_title();
if (s3 === peg$FAILED) {
s3 = peg$parseinteger_title();
if (s3 === peg$FAILED) {
s3 = peg$parsecv();
if (s3 === peg$FAILED) {
s3 = peg$parseinteger();
if (s3 === peg$FAILED) {
s3 = peg$parsecv_weak();
if (s3 === peg$FAILED) {
s3 = peg$parsec();
if (s3 === peg$FAILED) {
s3 = peg$parsev();
}
gitextract_krchq58f/ ├── .gitignore ├── LICENSE.md ├── Readme.md ├── bin/ │ ├── 01.add_lang.pl │ ├── add_cross_lang.pl │ ├── build_lang.sh │ ├── fuzz_lang.js │ ├── letters/ │ │ ├── blocks.txt │ │ └── letters.txt │ └── make_regexps.js ├── cjs/ │ ├── ar_bcv_parser.js │ ├── ascii_bcv_parser.js │ ├── bg_bcv_parser.js │ ├── ceb_bcv_parser.js │ ├── cs_bcv_parser.js │ ├── da_bcv_parser.js │ ├── de_bcv_parser.js │ ├── el_bcv_parser.js │ ├── en_bcv_parser.js │ ├── es_bcv_parser.js │ ├── fa_bcv_parser.js │ ├── fi_bcv_parser.js │ ├── fr_bcv_parser.js │ ├── full_bcv_parser.js │ ├── he_bcv_parser.js │ ├── hi_bcv_parser.js │ ├── hr_bcv_parser.js │ ├── ht_bcv_parser.js │ ├── hu_bcv_parser.js │ ├── id_bcv_parser.js │ ├── is_bcv_parser.js │ ├── it_bcv_parser.js │ ├── ja_bcv_parser.js │ ├── jv_bcv_parser.js │ ├── ko_bcv_parser.js │ ├── la_bcv_parser.js │ ├── mk_bcv_parser.js │ ├── mr_bcv_parser.js │ ├── ne_bcv_parser.js │ ├── nl_bcv_parser.js │ ├── no_bcv_parser.js │ ├── or_bcv_parser.js │ ├── pa_bcv_parser.js │ ├── package.json │ ├── pl_bcv_parser.js │ ├── pt_bcv_parser.js │ ├── ro_bcv_parser.js │ ├── ru_bcv_parser.js │ ├── sk_bcv_parser.js │ ├── so_bcv_parser.js │ ├── sq_bcv_parser.js │ ├── sr_bcv_parser.js │ ├── sv_bcv_parser.js │ ├── sw_bcv_parser.js │ ├── ta_bcv_parser.js │ ├── th_bcv_parser.js │ ├── tl_bcv_parser.js │ ├── tr_bcv_parser.js │ ├── uk_bcv_parser.js │ ├── ur_bcv_parser.js │ ├── vi_bcv_parser.js │ └── zh_bcv_parser.js ├── esm/ │ ├── bcv_parser.d.ts │ ├── bcv_parser.js │ └── lang/ │ ├── ar.d.ts │ ├── ar.js │ ├── ascii.d.ts │ ├── ascii.js │ ├── bg.d.ts │ ├── bg.js │ ├── ceb.d.ts │ ├── ceb.js │ ├── cs.d.ts │ ├── cs.js │ ├── da.d.ts │ ├── da.js │ ├── de.d.ts │ ├── de.js │ ├── el.d.ts │ ├── el.js │ ├── en.d.ts │ ├── en.js │ ├── es.d.ts │ ├── es.js │ ├── fa.d.ts │ ├── fa.js │ ├── fi.d.ts │ ├── fi.js │ ├── fr.d.ts │ ├── fr.js │ ├── full.d.ts │ ├── full.js │ ├── he.d.ts │ ├── he.js │ ├── hi.d.ts │ ├── hi.js │ ├── hr.d.ts │ ├── hr.js │ ├── ht.d.ts │ ├── ht.js │ ├── hu.d.ts │ ├── hu.js │ ├── id.d.ts │ ├── id.js │ ├── is.d.ts │ ├── is.js │ ├── it.d.ts │ ├── it.js │ ├── ja.d.ts │ ├── ja.js │ ├── jv.d.ts │ ├── jv.js │ ├── ko.d.ts │ ├── ko.js │ ├── la.d.ts │ ├── la.js │ ├── mk.d.ts │ ├── mk.js │ ├── mr.d.ts │ ├── mr.js │ ├── ne.d.ts │ ├── ne.js │ ├── nl.d.ts │ ├── nl.js │ ├── no.d.ts │ ├── no.js │ ├── or.d.ts │ ├── or.js │ ├── pa.d.ts │ ├── pa.js │ ├── pl.d.ts │ ├── pl.js │ ├── pt.d.ts │ ├── pt.js │ ├── ro.d.ts │ ├── ro.js │ ├── ru.d.ts │ ├── ru.js │ ├── sk.d.ts │ ├── sk.js │ ├── so.d.ts │ ├── so.js │ ├── sq.d.ts │ ├── sq.js │ ├── sr.d.ts │ ├── sr.js │ ├── sv.d.ts │ ├── sv.js │ ├── sw.d.ts │ ├── sw.js │ ├── ta.d.ts │ ├── ta.js │ ├── th.d.ts │ ├── th.js │ ├── tl.d.ts │ ├── tl.js │ ├── tr.d.ts │ ├── tr.js │ ├── uk.d.ts │ ├── uk.js │ ├── ur.d.ts │ ├── ur.js │ ├── vi.d.ts │ ├── vi.js │ ├── zh.d.ts │ └── zh.js ├── js/ │ ├── ar_bcv_parser.js │ ├── ascii_bcv_parser.js │ ├── bg_bcv_parser.js │ ├── ceb_bcv_parser.js │ ├── cs_bcv_parser.js │ ├── da_bcv_parser.js │ ├── de_bcv_parser.js │ ├── el_bcv_parser.js │ ├── en_bcv_parser.js │ ├── es_bcv_parser.js │ ├── fi_bcv_parser.js │ ├── fr_bcv_parser.js │ ├── full_bcv_parser.js │ ├── he_bcv_parser.js │ ├── hi_bcv_parser.js │ ├── hr_bcv_parser.js │ ├── ht_bcv_parser.js │ ├── hu_bcv_parser.js │ ├── is_bcv_parser.js │ ├── it_bcv_parser.js │ ├── ja_bcv_parser.js │ ├── jv_bcv_parser.js │ ├── ko_bcv_parser.js │ ├── la_bcv_parser.js │ ├── mk_bcv_parser.js │ ├── mr_bcv_parser.js │ ├── ne_bcv_parser.js │ ├── nl_bcv_parser.js │ ├── no_bcv_parser.js │ ├── or_bcv_parser.js │ ├── pa_bcv_parser.js │ ├── package.json │ ├── pl_bcv_parser.js │ ├── pt_bcv_parser.js │ ├── ro_bcv_parser.js │ ├── ru_bcv_parser.js │ ├── sk_bcv_parser.js │ ├── so_bcv_parser.js │ ├── sq_bcv_parser.js │ ├── sr_bcv_parser.js │ ├── sv_bcv_parser.js │ ├── sw_bcv_parser.js │ ├── ta_bcv_parser.js │ ├── th_bcv_parser.js │ ├── tl_bcv_parser.js │ ├── tr_bcv_parser.js │ ├── uk_bcv_parser.js │ ├── ur_bcv_parser.js │ ├── vi_bcv_parser.js │ └── zh_bcv_parser.js ├── package.json ├── src/ │ ├── ar/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ascii/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── bg/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ceb/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── core/ │ │ ├── bcv_grammar.pegjs │ │ ├── bcv_matcher.ts │ │ ├── bcv_options.ts │ │ ├── bcv_parser.ts │ │ ├── bcv_passage.ts │ │ ├── bcv_regexps_manager.ts │ │ ├── bcv_translations_manager.ts │ │ ├── lang.d.ts │ │ ├── lang_bundle.ts │ │ ├── lang_grammar_options.ts │ │ ├── lang_regexps.ts │ │ ├── lang_spec.js │ │ ├── lang_specrunner.html │ │ ├── lang_translations.ts │ │ ├── peg_plugin.js │ │ └── types.d.ts │ ├── cs/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── da/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── de/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── el/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── en/ │ │ ├── book_names.txt │ │ ├── data.txt │ │ ├── psalm_cb.js │ │ └── translation_additions.js │ ├── es/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── fa/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── fi/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── fr/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── full/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── he/ │ │ ├── book_names.txt │ │ ├── data.txt │ │ ├── spec_additions.js │ │ └── translation_additions.js │ ├── hi/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── hr/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ht/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── hu/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── id/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── is/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── it/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ja/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── jv/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ko/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── la/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── mk/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── mr/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ne/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── nl/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── no/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── or/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── pa/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── pl/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── pt/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ro/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ru/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── sk/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── so/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── sq/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── sr/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── sv/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── sw/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ta/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── template/ │ │ ├── SpecRunner.html │ │ ├── data.txt │ │ ├── grammar.pegjs │ │ ├── regexps.coffee │ │ ├── research.xlsx │ │ ├── spec.coffee │ │ └── translations.coffee │ ├── th/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── tl/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── tr/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── uk/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── ur/ │ │ ├── book_names.txt │ │ └── data.txt │ ├── vi/ │ │ ├── book_names.txt │ │ └── data.txt │ └── zh/ │ ├── book_names.txt │ └── data.txt ├── test/ │ ├── apocrypha.spec.js │ ├── cjs.spec.cjs │ ├── compaction.spec.js │ ├── docs.spec.js │ ├── existence.spec.js │ ├── html/ │ │ ├── ar.html │ │ ├── ascii.html │ │ ├── bg.html │ │ ├── ceb.html │ │ ├── cs.html │ │ ├── da.html │ │ ├── de.html │ │ ├── el.html │ │ ├── en.html │ │ ├── es.html │ │ ├── fa.html │ │ ├── fi.html │ │ ├── fr.html │ │ ├── full.html │ │ ├── he.html │ │ ├── hi.html │ │ ├── hr.html │ │ ├── ht.html │ │ ├── hu.html │ │ ├── id.html │ │ ├── is.html │ │ ├── it.html │ │ ├── ja.html │ │ ├── jv.html │ │ ├── ko.html │ │ ├── la.html │ │ ├── mk.html │ │ ├── mr.html │ │ ├── ne.html │ │ ├── nl.html │ │ ├── no.html │ │ ├── or.html │ │ ├── pa.html │ │ ├── pl.html │ │ ├── pt.html │ │ ├── ro.html │ │ ├── ru.html │ │ ├── sk.html │ │ ├── so.html │ │ ├── sq.html │ │ ├── sr.html │ │ ├── sv.html │ │ ├── sw.html │ │ ├── ta.html │ │ ├── th.html │ │ ├── tl.html │ │ ├── tr.html │ │ ├── uk.html │ │ ├── ur.html │ │ ├── vi.html │ │ └── zh.html │ ├── lang/ │ │ ├── ar.spec.js │ │ ├── ascii.spec.js │ │ ├── bg.spec.js │ │ ├── ceb.spec.js │ │ ├── cs.spec.js │ │ ├── da.spec.js │ │ ├── de.spec.js │ │ ├── el.spec.js │ │ ├── en.spec.js │ │ ├── es.spec.js │ │ ├── fa.spec.js │ │ ├── fi.spec.js │ │ ├── fr.spec.js │ │ ├── full.spec.js │ │ ├── he.spec.js │ │ ├── hi.spec.js │ │ ├── hr.spec.js │ │ ├── ht.spec.js │ │ ├── hu.spec.js │ │ ├── id.spec.js │ │ ├── is.spec.js │ │ ├── it.spec.js │ │ ├── ja.spec.js │ │ ├── jv.spec.js │ │ ├── ko.spec.js │ │ ├── la.spec.js │ │ ├── mk.spec.js │ │ ├── mr.spec.js │ │ ├── ne.spec.js │ │ ├── nl.spec.js │ │ ├── no.spec.js │ │ ├── or.spec.js │ │ ├── pa.spec.js │ │ ├── pl.spec.js │ │ ├── pt.spec.js │ │ ├── ro.spec.js │ │ ├── ru.spec.js │ │ ├── sk.spec.js │ │ ├── so.spec.js │ │ ├── sq.spec.js │ │ ├── sr.spec.js │ │ ├── sv.spec.js │ │ ├── sw.spec.js │ │ ├── ta.spec.js │ │ ├── th.spec.js │ │ ├── tl.spec.js │ │ ├── tr.spec.js │ │ ├── uk.spec.js │ │ ├── ur.spec.js │ │ ├── vi.spec.js │ │ └── zh.spec.js │ ├── parse.spec.js │ ├── preparse.spec.js │ ├── realworld.spec.js │ ├── regexps.spec.js │ └── translations.spec.js └── tsconfig.json
Showing preview only (563K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (7476 symbols across 168 files)
FILE: bin/fuzz_lang.js
function get_abbrevs (line 10) | function get_abbrevs(lang) {
function get_translations (line 22) | function get_translations(lang) {
function get_options (line 26) | function get_options() {
function create_options (line 54) | function create_options(keys) {
function get_random_item_from_array (line 62) | function get_random_item_from_array(items) {
function build_text (line 66) | function build_text(keys) {
function make_token (line 79) | function make_token(type) {
function build_nested_string (line 95) | function build_nested_string(text) {
FILE: cjs/ar_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/ascii_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5625) | constructor() {
method constructor (line 6040) | constructor(message, expected, found, location) {
method format (line 6047) | format(sources) {
method buildMessage (line 6071) | static buildMessage(expected, found) {
function peg$parse2 (line 6139) | function peg$parse2(input, options) {
method constructor (line 9249) | constructor(lang = null) {
method parse (line 9274) | parse(string_to_parse) {
method parse_with_context (line 9283) | parse_with_context(string_to_parse, context_string) {
method reset (line 9302) | reset() {
method set_options (line 9309) | set_options(options) {
method include_apocrypha (line 9322) | include_apocrypha(arg) {
method translation_info (line 9337) | translation_info(translation = "default") {
method osis (line 9342) | osis() {
method osis_and_translations (line 9352) | osis_and_translations() {
method osis_and_indices (line 9362) | osis_and_indices() {
method parsed_entities (line 9376) | parsed_entities() {
method parse_entity_passages (line 9433) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9477) | to_osis(start, end, translation) {
method fix_ps151 (line 9542) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9576) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9614) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9630) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9649) | snap_range(entity, passage_i) {
method snap_sequence (line 9696) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9708) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9720) | starts_with_book(passage) {
method remove_absolute_indices (line 9730) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9744) | add_books(books) {
method add_translations (line 9747) | add_translations(translations) {
FILE: cjs/bg_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5521) | constructor() {
method constructor (line 5919) | constructor(message, expected, found, location) {
method format (line 5926) | format(sources) {
method buildMessage (line 5950) | static buildMessage(expected, found) {
function peg$parse2 (line 6018) | function peg$parse2(input, options) {
method constructor (line 9128) | constructor(lang = null) {
method parse (line 9153) | parse(string_to_parse) {
method parse_with_context (line 9162) | parse_with_context(string_to_parse, context_string) {
method reset (line 9181) | reset() {
method set_options (line 9188) | set_options(options) {
method include_apocrypha (line 9201) | include_apocrypha(arg) {
method translation_info (line 9216) | translation_info(translation = "default") {
method osis (line 9221) | osis() {
method osis_and_translations (line 9231) | osis_and_translations() {
method osis_and_indices (line 9241) | osis_and_indices() {
method parsed_entities (line 9255) | parsed_entities() {
method parse_entity_passages (line 9312) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9356) | to_osis(start, end, translation) {
method fix_ps151 (line 9421) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9455) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9493) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9509) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9528) | snap_range(entity, passage_i) {
method snap_sequence (line 9575) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9587) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9599) | starts_with_book(passage) {
method remove_absolute_indices (line 9609) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9623) | add_books(books) {
method add_translations (line 9626) | add_translations(translations) {
FILE: cjs/ceb_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/cs_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/da_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/de_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/el_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/en_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5564) | constructor() {
method constructor (line 5979) | constructor(message, expected, found, location) {
method format (line 5986) | format(sources) {
method buildMessage (line 6010) | static buildMessage(expected, found) {
function peg$parse2 (line 6078) | function peg$parse2(input, options) {
method constructor (line 9188) | constructor(lang = null) {
method parse (line 9213) | parse(string_to_parse) {
method parse_with_context (line 9222) | parse_with_context(string_to_parse, context_string) {
method reset (line 9241) | reset() {
method set_options (line 9248) | set_options(options) {
method include_apocrypha (line 9261) | include_apocrypha(arg) {
method translation_info (line 9276) | translation_info(translation = "default") {
method osis (line 9281) | osis() {
method osis_and_translations (line 9291) | osis_and_translations() {
method osis_and_indices (line 9301) | osis_and_indices() {
method parsed_entities (line 9315) | parsed_entities() {
method parse_entity_passages (line 9372) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9416) | to_osis(start, end, translation) {
method fix_ps151 (line 9481) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9515) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9553) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9569) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9588) | snap_range(entity, passage_i) {
method snap_sequence (line 9635) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9647) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9659) | starts_with_book(passage) {
method remove_absolute_indices (line 9669) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9683) | add_books(books) {
method add_translations (line 9686) | add_translations(translations) {
FILE: cjs/es_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5544) | constructor() {
method constructor (line 5942) | constructor(message, expected, found, location) {
method format (line 5949) | format(sources) {
method buildMessage (line 5973) | static buildMessage(expected, found) {
function peg$parse2 (line 6041) | function peg$parse2(input, options) {
method constructor (line 9151) | constructor(lang = null) {
method parse (line 9176) | parse(string_to_parse) {
method parse_with_context (line 9185) | parse_with_context(string_to_parse, context_string) {
method reset (line 9204) | reset() {
method set_options (line 9211) | set_options(options) {
method include_apocrypha (line 9224) | include_apocrypha(arg) {
method translation_info (line 9239) | translation_info(translation = "default") {
method osis (line 9244) | osis() {
method osis_and_translations (line 9254) | osis_and_translations() {
method osis_and_indices (line 9264) | osis_and_indices() {
method parsed_entities (line 9278) | parsed_entities() {
method parse_entity_passages (line 9335) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9379) | to_osis(start, end, translation) {
method fix_ps151 (line 9444) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9478) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9516) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9532) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9551) | snap_range(entity, passage_i) {
method snap_sequence (line 9598) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9610) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9622) | starts_with_book(passage) {
method remove_absolute_indices (line 9632) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9646) | add_books(books) {
method add_translations (line 9649) | add_translations(translations) {
FILE: cjs/fa_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/fi_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/fr_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5527) | constructor() {
method constructor (line 5925) | constructor(message, expected, found, location) {
method format (line 5932) | format(sources) {
method buildMessage (line 5956) | static buildMessage(expected, found) {
function peg$parse2 (line 6024) | function peg$parse2(input, options) {
method constructor (line 9134) | constructor(lang = null) {
method parse (line 9159) | parse(string_to_parse) {
method parse_with_context (line 9168) | parse_with_context(string_to_parse, context_string) {
method reset (line 9187) | reset() {
method set_options (line 9194) | set_options(options) {
method include_apocrypha (line 9207) | include_apocrypha(arg) {
method translation_info (line 9222) | translation_info(translation = "default") {
method osis (line 9227) | osis() {
method osis_and_translations (line 9237) | osis_and_translations() {
method osis_and_indices (line 9247) | osis_and_indices() {
method parsed_entities (line 9261) | parsed_entities() {
method parse_entity_passages (line 9318) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9362) | to_osis(start, end, translation) {
method fix_ps151 (line 9427) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9461) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9499) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9515) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9534) | snap_range(entity, passage_i) {
method snap_sequence (line 9581) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9593) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9605) | starts_with_book(passage) {
method remove_absolute_indices (line 9615) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9629) | add_books(books) {
method add_translations (line 9632) | add_translations(translations) {
FILE: cjs/full_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5651) | constructor() {
method constructor (line 6066) | constructor(message, expected, found, location) {
method format (line 6073) | format(sources) {
method buildMessage (line 6097) | static buildMessage(expected, found) {
function peg$parse2 (line 6165) | function peg$parse2(input, options) {
method constructor (line 9275) | constructor(lang = null) {
method parse (line 9300) | parse(string_to_parse) {
method parse_with_context (line 9309) | parse_with_context(string_to_parse, context_string) {
method reset (line 9328) | reset() {
method set_options (line 9335) | set_options(options) {
method include_apocrypha (line 9348) | include_apocrypha(arg) {
method translation_info (line 9363) | translation_info(translation = "default") {
method osis (line 9368) | osis() {
method osis_and_translations (line 9378) | osis_and_translations() {
method osis_and_indices (line 9388) | osis_and_indices() {
method parsed_entities (line 9402) | parsed_entities() {
method parse_entity_passages (line 9459) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9503) | to_osis(start, end, translation) {
method fix_ps151 (line 9568) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9602) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9640) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9656) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9675) | snap_range(entity, passage_i) {
method snap_sequence (line 9722) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9734) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9746) | starts_with_book(passage) {
method remove_absolute_indices (line 9756) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9770) | add_books(books) {
method add_translations (line 9773) | add_translations(translations) {
FILE: cjs/he_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5860) | constructor(message, expected, found, location) {
method format (line 5867) | format(sources) {
method buildMessage (line 5891) | static buildMessage(expected, found) {
function peg$parse2 (line 5959) | function peg$parse2(input, options) {
method constructor (line 9069) | constructor(lang = null) {
method parse (line 9094) | parse(string_to_parse) {
method parse_with_context (line 9103) | parse_with_context(string_to_parse, context_string) {
method reset (line 9122) | reset() {
method set_options (line 9129) | set_options(options) {
method include_apocrypha (line 9142) | include_apocrypha(arg) {
method translation_info (line 9157) | translation_info(translation = "default") {
method osis (line 9162) | osis() {
method osis_and_translations (line 9172) | osis_and_translations() {
method osis_and_indices (line 9182) | osis_and_indices() {
method parsed_entities (line 9196) | parsed_entities() {
method parse_entity_passages (line 9253) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9297) | to_osis(start, end, translation) {
method fix_ps151 (line 9362) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9396) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9434) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9450) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9469) | snap_range(entity, passage_i) {
method snap_sequence (line 9516) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9528) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9540) | starts_with_book(passage) {
method remove_absolute_indices (line 9550) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9564) | add_books(books) {
method add_translations (line 9567) | add_translations(translations) {
FILE: cjs/hi_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/hr_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/ht_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/hu_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/id_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constructor() {
method constructor (line 5909) | constructor(message, expected, found, location) {
method format (line 5916) | format(sources) {
method buildMessage (line 5940) | static buildMessage(expected, found) {
function peg$parse2 (line 6008) | function peg$parse2(input, options) {
method constructor (line 9118) | constructor(lang = null) {
method parse (line 9143) | parse(string_to_parse) {
method parse_with_context (line 9152) | parse_with_context(string_to_parse, context_string) {
method reset (line 9171) | reset() {
method set_options (line 9178) | set_options(options) {
method include_apocrypha (line 9191) | include_apocrypha(arg) {
method translation_info (line 9206) | translation_info(translation = "default") {
method osis (line 9211) | osis() {
method osis_and_translations (line 9221) | osis_and_translations() {
method osis_and_indices (line 9231) | osis_and_indices() {
method parsed_entities (line 9245) | parsed_entities() {
method parse_entity_passages (line 9302) | parse_entity_passages(entity, entity_id, translations, system) {
method to_osis (line 9346) | to_osis(start, end, translation) {
method fix_ps151 (line 9411) | fix_ps151(start, end, translation) {
method combine_consecutive_passages (line 9445) | combine_consecutive_passages(osises, translation) {
method snap_enclosed_indices (line 9483) | snap_enclosed_indices(osises) {
method is_verse_consecutive (line 9499) | is_verse_consecutive(prev, check, translation) {
method snap_range (line 9518) | snap_range(entity, passage_i) {
method snap_sequence (line 9565) | snap_sequence(type, entity, osises, i, length) {
method get_snap_sequence_i (line 9577) | get_snap_sequence_i(passages, passage_i, length) {
method starts_with_book (line 9589) | starts_with_book(passage) {
method remove_absolute_indices (line 9599) | remove_absolute_indices(passages, passage_i) {
method add_books (line 9613) | add_books(books) {
method add_translations (line 9616) | add_translations(translations) {
FILE: cjs/is_bcv_parser.js
method constructor (line 30) | constructor(message, expected, found, location) {
method format (line 37) | format(sources) {
method buildMessage (line 61) | static buildMessage(expected, found) {
function peg$parse (line 129) | function peg$parse(input, options) {
method constructor (line 3241) | constructor(parent, grammar_options2) {
method replace_control_characters (line 3247) | replace_control_characters(s) {
method replace_non_ascii_numbers (line 3251) | replace_non_ascii_numbers(s) {
method match_books (line 3267) | match_books(s) {
method get_book_indices (line 3298) | get_book_indices(books, s) {
method match_passages (line 3308) | match_passages(s) {
method clean_end_match (line 3340) | clean_end_match(s, match, part) {
method adjust_regexp_end (line 3361) | adjust_regexp_end(accum, old_length, new_length) {
method create_book_range (line 3370) | create_book_range(s, passage, book_id) {
method add_book_range_object (line 3388) | add_book_range_object(passage, prev, start_book_number) {
method add_offset_to_indices (line 3419) | add_offset_to_indices(indices, value_to_add) {
method constructor (line 3427) | constructor(parent) {
method testaments (line 3479) | get testaments() {
method testaments (line 3482) | set testaments(filter) {
method set_apocrypha (line 3511) | set_apocrypha(include_apocrypha) {
method versification_system (line 3523) | get versification_system() {
method versification_system (line 3527) | set versification_system(system) {
method case_sensitive (line 3555) | get case_sensitive() {
method case_sensitive (line 3559) | set case_sensitive(arg) {
method grammar (line 3568) | get grammar() {
method grammar (line 3572) | set grammar(arg) {
method punctuation_strategy (line 3577) | get punctuation_strategy() {
method punctuation_strategy (line 3580) | set punctuation_strategy(arg) {
method constructor (line 3596) | constructor(options, translations) {
method handle_array (line 3604) | handle_array(passages, accum = [], context = {}) {
method handle_obj (line 3613) | handle_obj(passage, accum, context) {
method b (line 3622) | b(passage, accum, context) {
method b_pre (line 3648) | b_pre(passage, accum, context) {
method b_range (line 3652) | b_range(passage, accum, context) {
method b_range_pre (line 3656) | b_range_pre(passage, accum, context) {
method b_range_start (line 3678) | b_range_start(passage, accum, context) {
method base (line 3682) | base(passage, accum, context) {
method bc (line 3687) | bc(passage, accum, context) {
method bc_title (line 3739) | bc_title(passage, accum, context) {
method bcv (line 3772) | bcv(passage, accum, context) {
method bv (line 3806) | bv(passage, accum, context) {
method c (line 3826) | c(passage, accum, context) {
method c_psalm (line 3857) | c_psalm(passage, accum, context) {
method c_title (line 3875) | c_title(passage, accum, context) {
method cb_range (line 3892) | cb_range(passage, accum, context) {
method context (line 3900) | context(passage, accum, context) {
method cv (line 3908) | cv(passage, accum, context) {
method cv_psalm (line 3935) | cv_psalm(passage, accum, context) {
method ff (line 3944) | ff(passage, accum, context) {
method integer (line 3962) | integer(passage, accum, context) {
method integer_title (line 3969) | integer_title(passage, accum, context) {
method next_v (line 3990) | next_v(passage, accum, context) {
method sequence (line 4038) | sequence(passage, accum, context) {
method sequence_post_enclosed (line 4071) | sequence_post_enclosed(passage, accum, context) {
method v (line 4075) | v(passage, accum, context) {
method range (line 4102) | range(passage, accum, context) {
method range_change_end (line 4174) | range_change_end(passage, accum, new_end) {
method range_change_integer_end (line 4191) | range_change_integer_end(passage, accum) {
method range_check_new_end (line 4213) | range_check_new_end(translations, start_obj, end_obj, valid) {
method range_end_b (line 4238) | range_end_b(passage, accum, context) {
method range_get_new_end_value (line 4242) | range_get_new_end_value(start_obj, end_obj, valid, key) {
method range_handle_invalid (line 4255) | range_handle_invalid(valid, passage, start, start_obj, end, end_obj, acc...
method range_handle_valid (line 4279) | range_handle_valid(valid, passage, start, start_obj, end, end_obj, accum) {
method range_validate (line 4295) | range_validate(valid, start_obj, end_obj, passage) {
method stop (line 4320) | stop(passage, accum, context) {
method translation_sequence (line 4327) | translation_sequence(passage, accum, context) {
method translation_sequence_apply (line 4362) | translation_sequence_apply(accum, translations) {
method word (line 4387) | word(passage, accum, context) {
method pluck (line 4392) | pluck(type, passages) {
method pluck_integer (line 4400) | pluck_integer(type, passages) {
method pluck_last_recursively (line 4404) | pluck_last_recursively(type, passages) {
method set_context_from_object (line 4421) | set_context_from_object(context, keys, obj) {
method reset_context (line 4430) | reset_context(context, keys) {
method get_partial_verse (line 4435) | get_partial_verse(object_with_partial) {
method fix_start_zeroes (line 4445) | fix_start_zeroes(valid, c, v = void 0) {
method calculate_indices (line 4455) | calculate_indices(match, adjust) {
method get_absolute_indices (line 4504) | get_absolute_indices([start, end]) {
method normalize_passage_and_alternates (line 4519) | normalize_passage_and_alternates(passage, alternates, adjust_end_index_b...
method validate_ref (line 4538) | validate_ref(translations, start, end = null) {
method validate_end_ref (line 4570) | validate_end_ref(system, start, end, valid, messages) {
method validate_known_end_book (line 4597) | validate_known_end_book(system, order_system, start, end, valid, message...
method validate_known_start_book (line 4637) | validate_known_start_book(system, start, messages) {
method validate_start_ref (line 4683) | validate_start_ref(system, start, messages) {
method constructor (line 4703) | constructor(parent) {
method filter_books (line 4708) | filter_books(testaments, case_sensitive) {
method has_testament_overlap (line 4743) | has_testament_overlap(testaments, book_testament) {
method get_testament_overlap (line 4751) | get_testament_overlap(testaments, book) {
method add_books (line 4758) | add_books(books) {
method get_book_pattern_regexps (line 4805) | get_book_pattern_regexps(pattern, book_data) {
method get_book_testaments (line 4829) | get_book_testaments(pattern) {
method constructor (line 4879) | constructor(parent) {
method translation_info (line 4882) | translation_info(system = "default") {
method add_translations (line 4912) | add_translations(new_translations) {
method apply_case_sensitive (line 4950) | apply_case_sensitive(case_sensitive) {
method normalize_sent_translation_data (line 4957) | normalize_sent_translation_data(translation) {
method add_system (line 4973) | add_system(system, new_system) {
method make_system_books (line 4999) | make_system_books(books) {
method validate_system_chapters (line 5018) | validate_system_chapters(chapters) {
method add_new_translations_regexp (line 5034) | add_new_translations_regexp(texts_for_regexp, new_translations) {
method constructor (line 5053) | constructor() {
method constructor (line 5511) | constr
Copy disabled (too large)
Download .json
Condensed preview — 459 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (64,557K chars).
[
{
"path": ".gitignore",
"chars": 13,
"preview": "node_modules\n"
},
{
"path": "LICENSE.md",
"chars": 1061,
"preview": "Copyright (c) 2011-2026 Stephen Smith\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of t"
},
{
"path": "Readme.md",
"chars": 73129,
"preview": "# Bible Passage Reference Parser\n\nThis project is a Typescript implementation of a Bible-passage reference parser (seein"
},
{
"path": "bin/01.add_lang.pl",
"chars": 41352,
"preview": "use strict;\r\nuse warnings;\r\nuse Data::Dumper;\r\nuse Unicode::Normalize;\r\nuse JSON;\r\nuse MIME::Base64;\r\n\r\nmy ($lang) = @AR"
},
{
"path": "bin/add_cross_lang.pl",
"chars": 11268,
"preview": "use strict;\nuse warnings;\nuse Unicode::Normalize;\nuse utf8;\nuse Data::Dumper;\n\nmy $src_dir = '../src';\nmy %ranges = (\n\tf"
},
{
"path": "bin/build_lang.sh",
"chars": 3059,
"preview": "#!/usr/bin/bash\nif [ -z \"$1\" ]; then\n echo \"Please specify an ISO language code to continue, such as: sh build_lang.sh "
},
{
"path": "bin/fuzz_lang.js",
"chars": 5599,
"preview": "\"use strict\";\n\nconst lang = \"en\";\n\nimport * as fs from \"fs\";\nimport { bcv_parser } from \"../esm/bcv_parser.js\";\nconst la"
},
{
"path": "bin/letters/blocks.txt",
"chars": 4795,
"preview": "# http://xregexp.com/addons/unicode/unicode-blocks.js\nBasic_Latin\t\\u0000-\\u007F\nLatin_1_Supplement\t\\u0080-\\u00FF\nLatin_E"
},
{
"path": "bin/letters/letters.txt",
"chars": 7078,
"preview": "# http://xregexp.com/addons/unicode/unicode-base.js plus combining characters\n#\\u0030-\\u0039 #numbers\n\\u0041-\\u005A\n\\u00"
},
{
"path": "bin/make_regexps.js",
"chars": 1089,
"preview": "import { createRequire } from 'module';\nconst require = createRequire(import.meta.url);\n\nconst { RegExpBuilder } = requi"
},
{
"path": "cjs/ar_bcv_parser.js",
"chars": 334406,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ascii_bcv_parser.js",
"chars": 499907,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/bg_bcv_parser.js",
"chars": 341112,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ceb_bcv_parser.js",
"chars": 333027,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/cs_bcv_parser.js",
"chars": 338250,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/da_bcv_parser.js",
"chars": 335779,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/de_bcv_parser.js",
"chars": 335474,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/el_bcv_parser.js",
"chars": 334934,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/en_bcv_parser.js",
"chars": 359103,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/es_bcv_parser.js",
"chars": 339571,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/fa_bcv_parser.js",
"chars": 376614,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/fi_bcv_parser.js",
"chars": 337107,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/fr_bcv_parser.js",
"chars": 336983,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/full_bcv_parser.js",
"chars": 659523,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/he_bcv_parser.js",
"chars": 330957,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/hi_bcv_parser.js",
"chars": 333598,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/hr_bcv_parser.js",
"chars": 334827,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ht_bcv_parser.js",
"chars": 333075,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/hu_bcv_parser.js",
"chars": 335577,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/id_bcv_parser.js",
"chars": 333330,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/is_bcv_parser.js",
"chars": 335063,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/it_bcv_parser.js",
"chars": 335074,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ja_bcv_parser.js",
"chars": 332873,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/jv_bcv_parser.js",
"chars": 332679,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ko_bcv_parser.js",
"chars": 332186,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/la_bcv_parser.js",
"chars": 335987,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/mk_bcv_parser.js",
"chars": 336206,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/mr_bcv_parser.js",
"chars": 334222,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ne_bcv_parser.js",
"chars": 336215,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/nl_bcv_parser.js",
"chars": 335479,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/no_bcv_parser.js",
"chars": 335681,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/or_bcv_parser.js",
"chars": 333287,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/pa_bcv_parser.js",
"chars": 333418,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/package.json",
"chars": 23,
"preview": "{\n\t\"type\": \"commonjs\"\n}"
},
{
"path": "cjs/pl_bcv_parser.js",
"chars": 346286,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/pt_bcv_parser.js",
"chars": 334969,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ro_bcv_parser.js",
"chars": 334631,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ru_bcv_parser.js",
"chars": 338294,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/sk_bcv_parser.js",
"chars": 340267,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/so_bcv_parser.js",
"chars": 333025,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/sq_bcv_parser.js",
"chars": 332430,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/sr_bcv_parser.js",
"chars": 337167,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/sv_bcv_parser.js",
"chars": 334570,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/sw_bcv_parser.js",
"chars": 335948,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ta_bcv_parser.js",
"chars": 337725,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/th_bcv_parser.js",
"chars": 331813,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/tl_bcv_parser.js",
"chars": 339520,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/tr_bcv_parser.js",
"chars": 336079,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/uk_bcv_parser.js",
"chars": 347616,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/ur_bcv_parser.js",
"chars": 336668,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/vi_bcv_parser.js",
"chars": 334410,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "cjs/zh_bcv_parser.js",
"chars": 333302,
"preview": "if (typeof module === \"undefined\") { var module = {}; }\n\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOw"
},
{
"path": "esm/bcv_parser.d.ts",
"chars": 657,
"preview": "export declare class bcv_parser {\n\tconstructor(lang: BCVParserConstructor);\n\tparse(string_to_parse: string);\n\tparse_with"
},
{
"path": "esm/bcv_parser.js",
"chars": 192377,
"preview": "// build/bcv_grammar.js\nvar peg$SyntaxError = class extends SyntaxError {\n constructor(message, expected, found, locati"
},
{
"path": "esm/lang/ar.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ar.js",
"chars": 44468,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ar\"];\n "
},
{
"path": "esm/lang/ascii.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ascii.js",
"chars": 209969,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ar\", \"bg"
},
{
"path": "esm/lang/bg.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/bg.js",
"chars": 51174,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"bg\"];\n "
},
{
"path": "esm/lang/ceb.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ceb.js",
"chars": 43089,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ceb\"];\n "
},
{
"path": "esm/lang/cs.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/cs.js",
"chars": 48312,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"cs\"];\n "
},
{
"path": "esm/lang/da.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/da.js",
"chars": 45841,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"da\"];\n "
},
{
"path": "esm/lang/de.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/de.js",
"chars": 45536,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"de\"];\n "
},
{
"path": "esm/lang/el.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/el.js",
"chars": 44996,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"el\"];\n "
},
{
"path": "esm/lang/en.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/en.js",
"chars": 69165,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"en\"];\n "
},
{
"path": "esm/lang/es.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/es.js",
"chars": 49633,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"es\"];\n "
},
{
"path": "esm/lang/fa.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/fa.js",
"chars": 86676,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"fa\"];\n "
},
{
"path": "esm/lang/fi.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/fi.js",
"chars": 47169,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"fi\"];\n "
},
{
"path": "esm/lang/fr.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/fr.js",
"chars": 47045,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"fr\"];\n "
},
{
"path": "esm/lang/full.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/full.js",
"chars": 369585,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ar\", \"bg"
},
{
"path": "esm/lang/he.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/he.js",
"chars": 41019,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"he\"];\n "
},
{
"path": "esm/lang/hi.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/hi.js",
"chars": 43660,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"hi\"];\n "
},
{
"path": "esm/lang/hr.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/hr.js",
"chars": 44889,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"hr\"];\n "
},
{
"path": "esm/lang/ht.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ht.js",
"chars": 43137,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ht\"];\n "
},
{
"path": "esm/lang/hu.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/hu.js",
"chars": 45639,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"hu\"];\n "
},
{
"path": "esm/lang/id.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/id.js",
"chars": 43392,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"id\"];\n "
},
{
"path": "esm/lang/is.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/is.js",
"chars": 45125,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"is\"];\n "
},
{
"path": "esm/lang/it.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/it.js",
"chars": 45136,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"it\"];\n "
},
{
"path": "esm/lang/ja.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ja.js",
"chars": 42935,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ja\"];\n "
},
{
"path": "esm/lang/jv.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/jv.js",
"chars": 42741,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"jv\"];\n "
},
{
"path": "esm/lang/ko.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ko.js",
"chars": 42248,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ko\"];\n "
},
{
"path": "esm/lang/la.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/la.js",
"chars": 46049,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"la\"];\n "
},
{
"path": "esm/lang/mk.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/mk.js",
"chars": 46268,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"mk\"];\n "
},
{
"path": "esm/lang/mr.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/mr.js",
"chars": 44284,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"mr\"];\n "
},
{
"path": "esm/lang/ne.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ne.js",
"chars": 46277,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ne\"];\n "
},
{
"path": "esm/lang/nl.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/nl.js",
"chars": 45541,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"nl\"];\n "
},
{
"path": "esm/lang/no.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/no.js",
"chars": 45743,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"no\"];\n "
},
{
"path": "esm/lang/or.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/or.js",
"chars": 43349,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"or\"];\n "
},
{
"path": "esm/lang/pa.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/pa.js",
"chars": 43480,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"pa\"];\n "
},
{
"path": "esm/lang/pl.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/pl.js",
"chars": 56348,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"pl\"];\n "
},
{
"path": "esm/lang/pt.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/pt.js",
"chars": 45031,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"pt\"];\n "
},
{
"path": "esm/lang/ro.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ro.js",
"chars": 44693,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ro\"];\n "
},
{
"path": "esm/lang/ru.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ru.js",
"chars": 48356,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ru\"];\n "
},
{
"path": "esm/lang/sk.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/sk.js",
"chars": 50329,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"sk\"];\n "
},
{
"path": "esm/lang/so.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/so.js",
"chars": 43087,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"so\"];\n "
},
{
"path": "esm/lang/sq.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/sq.js",
"chars": 42492,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"sq\"];\n "
},
{
"path": "esm/lang/sr.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/sr.js",
"chars": 47229,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"sr\"];\n "
},
{
"path": "esm/lang/sv.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/sv.js",
"chars": 44632,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"sv\"];\n "
},
{
"path": "esm/lang/sw.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/sw.js",
"chars": 46010,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"sw\"];\n "
},
{
"path": "esm/lang/ta.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ta.js",
"chars": 47787,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ta\"];\n "
},
{
"path": "esm/lang/th.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/th.js",
"chars": 41875,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"th\"];\n "
},
{
"path": "esm/lang/tl.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/tl.js",
"chars": 49582,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"tl\"];\n "
},
{
"path": "esm/lang/tr.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/tr.js",
"chars": 46141,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"tr\"];\n "
},
{
"path": "esm/lang/uk.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/uk.js",
"chars": 57678,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"uk\"];\n "
},
{
"path": "esm/lang/ur.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/ur.js",
"chars": 46730,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"ur\"];\n "
},
{
"path": "esm/lang/vi.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/vi.js",
"chars": 44472,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"vi\"];\n "
},
{
"path": "esm/lang/zh.d.ts",
"chars": 98,
"preview": "export const grammar: unknown;\nexport const regexps: unknown;\nexport const translations: unknown;\n"
},
{
"path": "esm/lang/zh.js",
"chars": 43364,
"preview": "// build/bcv_regexps.ts\nvar bcv_regexps = class {\n constructor() {\n this.books = [];\n this.languages = [\"zh\"];\n "
},
{
"path": "js/ar_bcv_parser.js",
"chars": 252946,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/ascii_bcv_parser.js",
"chars": 477604,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/bg_bcv_parser.js",
"chars": 264001,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/ceb_bcv_parser.js",
"chars": 249853,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/cs_bcv_parser.js",
"chars": 260619,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/da_bcv_parser.js",
"chars": 255818,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/de_bcv_parser.js",
"chars": 256747,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/el_bcv_parser.js",
"chars": 269637,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/en_bcv_parser.js",
"chars": 306193,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/es_bcv_parser.js",
"chars": 279433,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/fi_bcv_parser.js",
"chars": 259925,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/fr_bcv_parser.js",
"chars": 266810,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/full_bcv_parser.js",
"chars": 654420,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/he_bcv_parser.js",
"chars": 258402,
"preview": "(function() {\r\n var bcv_parser, bcv_passage, bcv_utils, root,\r\n hasProp = {}.hasOwnProperty;\r\n\r\n root = this;\r\n\r\n "
},
{
"path": "js/hi_bcv_parser.js",
"chars": 249465,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/hr_bcv_parser.js",
"chars": 252718,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/ht_bcv_parser.js",
"chars": 251843,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/hu_bcv_parser.js",
"chars": 263218,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/is_bcv_parser.js",
"chars": 255828,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/it_bcv_parser.js",
"chars": 266268,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/ja_bcv_parser.js",
"chars": 258080,
"preview": "(function() {\r\n var bcv_parser, bcv_passage, bcv_utils, root,\r\n hasProp = {}.hasOwnProperty;\r\n\r\n root = this;\r\n\r\n "
},
{
"path": "js/jv_bcv_parser.js",
"chars": 249649,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/ko_bcv_parser.js",
"chars": 257869,
"preview": "(function() {\r\n var bcv_parser, bcv_passage, bcv_utils, root,\r\n hasProp = {}.hasOwnProperty;\r\n\r\n root = this;\r\n\r\n "
},
{
"path": "js/la_bcv_parser.js",
"chars": 262056,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/mk_bcv_parser.js",
"chars": 255160,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/mr_bcv_parser.js",
"chars": 251902,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/ne_bcv_parser.js",
"chars": 254291,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/nl_bcv_parser.js",
"chars": 262129,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/no_bcv_parser.js",
"chars": 258411,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/or_bcv_parser.js",
"chars": 250514,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/pa_bcv_parser.js",
"chars": 251961,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/package.json",
"chars": 23,
"preview": "{\n\t\"type\": \"commonjs\"\n}"
},
{
"path": "js/pl_bcv_parser.js",
"chars": 279638,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
},
{
"path": "js/pt_bcv_parser.js",
"chars": 269766,
"preview": "(function() {\n var bcv_parser, bcv_passage, bcv_utils, root,\n hasProp = {}.hasOwnProperty;\n\n root = this;\n\n bcv_pa"
}
]
// ... and 259 more files (download for full content)
About this extraction
This page contains the full source code of the openbibleinfo/Bible-Passage-Reference-Parser GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 459 files (55.2 MB), approximately 14.5M tokens, and a symbol index with 7476 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.