Repository: WeDontPanic/Jotoba Branch: dev Commit: 52073c380906 Files: 435 Total size: 1.2 MB Directory structure: gitextract_jueqquxr/ ├── .dockerignore ├── .github/ │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.md │ │ └── feature_request.md │ └── workflows/ │ └── docker-image.yml ├── .gitignore ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── deny.toml ├── docker-compose.yaml ├── html/ │ └── assets/ │ ├── css/ │ │ ├── main.css │ │ ├── mobile.css │ │ ├── overlay/ │ │ │ ├── croppingOverlay.css │ │ │ ├── footerOverlay.css │ │ │ ├── imgUploadOverlay.css │ │ │ ├── notificationOverlay.css │ │ │ ├── overlayBase.css │ │ │ ├── radicalOverlay.css │ │ │ ├── settingsOverlay.css │ │ │ └── suggestionOverlay.css │ │ ├── page/ │ │ │ ├── aboutPage.css │ │ │ ├── errorPage.css │ │ │ ├── footer.css │ │ │ ├── helpPage.css │ │ │ ├── indexPage.css │ │ │ ├── infoPage.css │ │ │ ├── kanjiPage.css │ │ │ ├── multiPage/ │ │ │ │ ├── kana.css │ │ │ │ ├── kanji.css │ │ │ │ └── markdown.css │ │ │ ├── namePage.css │ │ │ ├── newsPage.css │ │ │ ├── sentencePage.css │ │ │ ├── wordExtensions/ │ │ │ │ ├── searchAnnotation.css │ │ │ │ └── sentenceReader.css │ │ │ └── wordPage.css │ │ ├── search/ │ │ │ ├── choices.css │ │ │ └── searchRow.css │ │ └── tools/ │ │ ├── alerts.css │ │ ├── pagination.css │ │ └── ripple.css │ ├── docs.html │ ├── fonts/ │ │ └── fonts.css │ ├── js/ │ │ ├── lib/ │ │ │ ├── d3.js │ │ │ ├── jc.js │ │ │ └── jotobaChoices.js │ │ ├── locales/ │ │ │ └── collection.js │ │ ├── mobile.js │ │ ├── page/ │ │ │ ├── infoPage.js │ │ │ ├── kanjiPage.js │ │ │ ├── newsPage.js │ │ │ ├── overlay/ │ │ │ │ ├── notifications.js │ │ │ │ ├── settings.js │ │ │ │ └── settings_overlay.js │ │ │ ├── sentencePage.js │ │ │ └── wordPage.js │ │ ├── qol.js │ │ ├── search/ │ │ │ ├── api.js │ │ │ ├── eventHandler.js │ │ │ ├── overlay/ │ │ │ │ ├── imageSearch.js │ │ │ │ ├── radicalSearch.js │ │ │ │ ├── speechSearch.js │ │ │ │ └── suggestionOverlay.js │ │ │ ├── search.js │ │ │ ├── shared.js │ │ │ └── suggestions.js │ │ └── tools/ │ │ ├── jotoTools.js │ │ ├── ripple.js │ │ ├── service-worker.js │ │ ├── theme.js │ │ ├── utils.js │ │ └── utils2.js │ └── settings/ │ ├── manifest.json │ └── opensearch.xml ├── jotoba_bin/ │ ├── Cargo.toml │ ├── benches/ │ │ ├── my_benchmark.rs │ │ └── resources.rs │ └── src/ │ ├── check.rs │ ├── cli.rs │ ├── main.rs │ └── webserver.rs ├── lib/ │ ├── api/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── app/ │ │ │ ├── completions/ │ │ │ │ ├── kanji/ │ │ │ │ │ ├── meaning.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── reading.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── names/ │ │ │ │ │ └── mod.rs │ │ │ │ ├── opensearch/ │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── parse.rs │ │ │ │ ├── request.rs │ │ │ │ └── words/ │ │ │ │ ├── foreign.rs │ │ │ │ ├── hashtag.rs │ │ │ │ ├── kana_end_ext.rs │ │ │ │ ├── mod.rs │ │ │ │ └── native.rs │ │ │ ├── details/ │ │ │ │ ├── mod.rs │ │ │ │ ├── sentences.rs │ │ │ │ └── word.rs │ │ │ ├── img/ │ │ │ │ ├── mod.rs │ │ │ │ └── request.rs │ │ │ ├── kanji/ │ │ │ │ ├── ids_tree/ │ │ │ │ │ ├── builder.rs │ │ │ │ │ └── mod.rs │ │ │ │ └── mod.rs │ │ │ ├── mod.rs │ │ │ ├── news/ │ │ │ │ ├── detailed.rs │ │ │ │ ├── mod.rs │ │ │ │ └── short.rs │ │ │ ├── radical/ │ │ │ │ ├── kanji.rs │ │ │ │ ├── mod.rs │ │ │ │ └── search/ │ │ │ │ ├── jp_search.rs │ │ │ │ ├── meaning.rs │ │ │ │ └── mod.rs │ │ │ └── search/ │ │ │ ├── kanji.rs │ │ │ ├── mod.rs │ │ │ ├── names.rs │ │ │ ├── sentences.rs │ │ │ └── words.rs │ │ ├── internal/ │ │ │ ├── info/ │ │ │ │ ├── mod.rs │ │ │ │ └── words.rs │ │ │ └── mod.rs │ │ ├── lib.rs │ │ └── search/ │ │ ├── kanji/ │ │ │ └── mod.rs │ │ ├── mod.rs │ │ ├── name/ │ │ │ └── mod.rs │ │ ├── sentence/ │ │ │ └── mod.rs │ │ └── word/ │ │ └── mod.rs │ ├── config/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ ├── engine/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── lib.rs │ │ ├── pushable/ │ │ │ ├── counter.rs │ │ │ ├── f_max_cnt.rs │ │ │ ├── max_cnt.rs │ │ │ ├── mod.rs │ │ │ ├── push_dbg.rs │ │ │ ├── push_fn.rs │ │ │ └── push_mod.rs │ │ ├── relevance/ │ │ │ ├── data.rs │ │ │ ├── item.rs │ │ │ └── mod.rs │ │ ├── result.rs │ │ ├── task.rs │ │ └── utils.rs │ ├── error/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── api_error.rs │ │ └── lib.rs │ ├── frontend/ │ │ ├── Cargo.toml │ │ ├── src/ │ │ │ ├── about.rs │ │ │ ├── actix_ructe.rs │ │ │ ├── build.rs │ │ │ ├── direct.rs │ │ │ ├── help_page.rs │ │ │ ├── index.rs │ │ │ ├── lib.rs │ │ │ ├── liveness.rs │ │ │ ├── news_ep.rs │ │ │ ├── og_tags.rs │ │ │ ├── search_ep.rs │ │ │ ├── search_help.rs │ │ │ ├── session.rs │ │ │ ├── templ_utils.rs │ │ │ ├── unescaped.rs │ │ │ ├── url_query.rs │ │ │ ├── user_settings.rs │ │ │ └── web_error.rs │ │ └── templates/ │ │ ├── base.rs.html │ │ ├── base_index.rs.html │ │ ├── error_page.rs.html │ │ ├── functional/ │ │ │ └── render_sentence.rs.html │ │ ├── overlays/ │ │ │ ├── info/ │ │ │ │ ├── collocations.rs.html │ │ │ │ ├── definitions_jp.rs.html │ │ │ │ └── inflections.rs.html │ │ │ ├── mobile_overlays.rs.html │ │ │ ├── page/ │ │ │ │ ├── decomposition_graph.rs.html │ │ │ │ ├── image_crop.rs.html │ │ │ │ ├── loading.rs.html │ │ │ │ └── settings.rs.html │ │ │ ├── page_overlays.rs.html │ │ │ ├── search_overlays.rs.html │ │ │ └── searchbar/ │ │ │ ├── image_input.rs.html │ │ │ ├── radicals.rs.html │ │ │ ├── speech.rs.html │ │ │ └── suggestions.rs.html │ │ ├── pages/ │ │ │ ├── about.rs.html │ │ │ ├── info.rs.html │ │ │ ├── kanji.rs.html │ │ │ ├── names.rs.html │ │ │ ├── news.rs.html │ │ │ ├── search_help.rs.html │ │ │ ├── sentences.rs.html │ │ │ └── words.rs.html │ │ └── subtemplates/ │ │ ├── footer.rs.html │ │ ├── head.rs.html │ │ ├── input_dropdown.rs.html │ │ ├── main_body.rs.html │ │ └── paginator.rs.html │ ├── indexes/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── hashtag.rs │ │ ├── kanji/ │ │ │ ├── mod.rs │ │ │ ├── reading.rs │ │ │ └── reading_freq/ │ │ │ ├── k_freq_item.rs │ │ │ ├── mod.rs │ │ │ └── reading.rs │ │ ├── lib.rs │ │ ├── names.rs │ │ ├── ng_freq.rs │ │ ├── radical.rs │ │ ├── regex.rs │ │ ├── sentences.rs │ │ ├── storage/ │ │ │ ├── kanji.rs │ │ │ ├── mod.rs │ │ │ ├── name.rs │ │ │ ├── radical.rs │ │ │ ├── sentence.rs │ │ │ ├── suggestions.rs │ │ │ ├── utils.rs │ │ │ └── word.rs │ │ ├── term_freq.rs │ │ └── words/ │ │ ├── foreign.rs │ │ ├── mod.rs │ │ └── native.rs │ ├── japanese/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── furigana/ │ │ │ ├── generate/ │ │ │ │ ├── mod.rs │ │ │ │ └── traits.rs │ │ │ ├── mod.rs │ │ │ └── tests.rs │ │ ├── guessing.rs │ │ ├── lib.rs │ │ └── radicals.rs │ ├── localization/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── error.rs │ │ ├── language.rs │ │ ├── lib.rs │ │ └── traits.rs │ ├── news/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ ├── resources/ │ │ ├── Cargo.toml │ │ ├── build.rs │ │ └── src/ │ │ ├── lib.rs │ │ ├── retrieve/ │ │ │ ├── kanji.rs │ │ │ ├── mod.rs │ │ │ ├── name.rs │ │ │ ├── sentence.rs │ │ │ └── word.rs │ │ └── storage/ │ │ ├── feature.rs │ │ ├── kanji.rs │ │ ├── mod.rs │ │ ├── name.rs │ │ ├── sentence.rs │ │ └── word.rs │ ├── search/ │ │ ├── Cargo.toml │ │ ├── src/ │ │ │ ├── engine/ │ │ │ │ ├── mod.rs │ │ │ │ ├── names/ │ │ │ │ │ ├── foreign.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── native.rs │ │ │ │ ├── radical/ │ │ │ │ │ └── mod.rs │ │ │ │ ├── sentences/ │ │ │ │ │ ├── foreign.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── native.rs │ │ │ │ └── words/ │ │ │ │ ├── foreign.rs │ │ │ │ ├── mod.rs │ │ │ │ └── native/ │ │ │ │ ├── k_reading.rs │ │ │ │ ├── mod.rs │ │ │ │ └── regex.rs │ │ │ ├── executor/ │ │ │ │ ├── mod.rs │ │ │ │ ├── out_builder.rs │ │ │ │ ├── producer.rs │ │ │ │ ├── search_result.rs │ │ │ │ └── searchable.rs │ │ │ ├── kanji/ │ │ │ │ ├── mod.rs │ │ │ │ ├── order.rs │ │ │ │ ├── result.rs │ │ │ │ └── tag_only.rs │ │ │ ├── lib.rs │ │ │ ├── name/ │ │ │ │ ├── mod.rs │ │ │ │ ├── order/ │ │ │ │ │ ├── foreign.rs │ │ │ │ │ ├── japanese.rs │ │ │ │ │ └── mod.rs │ │ │ │ └── producer/ │ │ │ │ ├── foreign.rs │ │ │ │ ├── kanji_reading.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── native/ │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── split.rs │ │ │ │ └── sequence.rs │ │ │ ├── query/ │ │ │ │ ├── form.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── parser/ │ │ │ │ │ ├── lang.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── prefix.rs │ │ │ │ │ ├── req_terms.rs │ │ │ │ │ └── tags.rs │ │ │ │ ├── prefix.rs │ │ │ │ ├── regex.rs │ │ │ │ ├── tags.rs │ │ │ │ └── user_settings.rs │ │ │ ├── radical/ │ │ │ │ ├── mod.rs │ │ │ │ └── word/ │ │ │ │ ├── foreign.rs │ │ │ │ ├── mod.rs │ │ │ │ └── romaji.rs │ │ │ ├── sentence/ │ │ │ │ ├── mod.rs │ │ │ │ ├── order/ │ │ │ │ │ ├── foreign.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── native.rs │ │ │ │ ├── producer/ │ │ │ │ │ ├── filter.rs │ │ │ │ │ ├── foreign.rs │ │ │ │ │ ├── kanji.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── native.rs │ │ │ │ │ ├── sequence.rs │ │ │ │ │ └── tag.rs │ │ │ │ └── result.rs │ │ │ └── word/ │ │ │ ├── filter.rs │ │ │ ├── kanji.rs │ │ │ ├── mod.rs │ │ │ ├── order/ │ │ │ │ ├── foreign.rs │ │ │ │ ├── kanji_reading.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── native.rs │ │ │ │ └── regex.rs │ │ │ ├── producer/ │ │ │ │ ├── foreign/ │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── romaji.rs │ │ │ │ │ └── task.rs │ │ │ │ ├── japanese/ │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── number.rs │ │ │ │ │ ├── sentence_reader.rs │ │ │ │ │ └── task.rs │ │ │ │ ├── k_reading.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── regex.rs │ │ │ │ ├── sequence.rs │ │ │ │ └── tag.rs │ │ │ └── result.rs │ │ └── tests/ │ │ └── search_test.rs │ ├── sentence_reader/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── analyzer.rs │ │ ├── grammar/ │ │ │ ├── mod.rs │ │ │ ├── rule.rs │ │ │ └── rule_set.rs │ │ ├── lib.rs │ │ ├── output.rs │ │ └── sentence/ │ │ ├── inflection.rs │ │ ├── mod.rs │ │ ├── owned_morpheme.rs │ │ └── part.rs │ ├── types/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── api/ │ │ │ ├── app/ │ │ │ │ ├── completions/ │ │ │ │ │ └── mod.rs │ │ │ │ ├── details/ │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── query.rs │ │ │ │ │ ├── sentence.rs │ │ │ │ │ └── word.rs │ │ │ │ ├── image/ │ │ │ │ │ └── mod.rs │ │ │ │ ├── kanji/ │ │ │ │ │ ├── ids_tree.rs │ │ │ │ │ └── mod.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── news/ │ │ │ │ │ ├── long.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── short.rs │ │ │ │ ├── radical/ │ │ │ │ │ ├── find_kanji.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── search.rs │ │ │ │ └── search/ │ │ │ │ ├── mod.rs │ │ │ │ ├── query.rs │ │ │ │ └── responses/ │ │ │ │ ├── k_compounds.rs │ │ │ │ ├── kanji.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── names.rs │ │ │ │ ├── sentences.rs │ │ │ │ └── words/ │ │ │ │ ├── inflection.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── sentence.rs │ │ │ │ └── word.rs │ │ │ ├── internal/ │ │ │ │ ├── info/ │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── words.rs │ │ │ │ └── mod.rs │ │ │ ├── mod.rs │ │ │ └── search/ │ │ │ ├── kanji.rs │ │ │ ├── mod.rs │ │ │ ├── name.rs │ │ │ ├── sentence.rs │ │ │ └── word.rs │ │ ├── jotoba/ │ │ │ ├── indexes/ │ │ │ │ ├── hashtag.rs │ │ │ │ └── mod.rs │ │ │ ├── kanji/ │ │ │ │ ├── mod.rs │ │ │ │ ├── radical.rs │ │ │ │ └── reading.rs │ │ │ ├── language/ │ │ │ │ ├── mod.rs │ │ │ │ └── param.rs │ │ │ ├── mod.rs │ │ │ ├── names/ │ │ │ │ ├── mod.rs │ │ │ │ └── name_type.rs │ │ │ ├── pagination/ │ │ │ │ ├── mod.rs │ │ │ │ └── page.rs │ │ │ ├── search/ │ │ │ │ ├── guess.rs │ │ │ │ ├── help.rs │ │ │ │ ├── mod.rs │ │ │ │ └── query_type.rs │ │ │ ├── sentences/ │ │ │ │ ├── mod.rs │ │ │ │ ├── tag.rs │ │ │ │ └── translation.rs │ │ │ └── words/ │ │ │ ├── dialect.rs │ │ │ ├── dict.rs │ │ │ ├── field.rs │ │ │ ├── foreign_language.rs │ │ │ ├── gtype.rs │ │ │ ├── inflection.rs │ │ │ ├── information.rs │ │ │ ├── misc.rs │ │ │ ├── mod.rs │ │ │ ├── part_of_speech.rs │ │ │ ├── pitch/ │ │ │ │ ├── border.rs │ │ │ │ ├── mod.rs │ │ │ │ └── raw_data.rs │ │ │ ├── priority.rs │ │ │ ├── reading/ │ │ │ │ ├── iter.rs │ │ │ │ └── mod.rs │ │ │ └── sense.rs │ │ ├── lib.rs │ │ └── raw/ │ │ ├── jmdict/ │ │ │ └── mod.rs │ │ ├── jmnedict/ │ │ │ └── mod.rs │ │ ├── kanjidict/ │ │ │ └── mod.rs │ │ └── mod.rs │ └── utils/ │ ├── Cargo.toml │ └── src/ │ ├── binary_search.rs │ ├── korean.rs │ └── lib.rs ├── locales/ │ ├── de.mo │ ├── de.po │ ├── en.mo │ ├── en.po │ ├── hu.mo │ └── hu.po ├── rustfmt.toml └── scripts/ └── gen_locales.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .dockerignore ================================================ /html/assets/js/lib/loadAnalytics.js /html/assets/svg /html/audio /html/*.html !/html/docs.html /target out *.old .env ipadic/ unidic/ unidic-mecab/ data/ data /resources/ /suggestions /suggestions_ audio_old *.bat ======= indexes/ indexes_old massiv.out */*/target tmp/ img_scan_tmp/ /news .gitignore /*.zip ================================================ FILE: .github/FUNDING.yml ================================================ # These are supported funding model platforms custom: ["https://paypal.me/JojiiOfficial", "https://paypal.me/yukaru1"] ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a report to help us improve. Check the trello board first! title: '' labels: '' assignees: '' --- Before you file a bug report, check the [trello board](https://trello.com/b/nmG0xgaW/jotoba-roadmap) if this has already added on the road map. **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: [e.g. iOS] - Browser [e.g. chrome, safari] **Smartphone (please complete the following information):** - Device: [e.g. iPhone6] - OS: [e.g. iOS8.1] - Browser [e.g. stock browser, safari] **Additional context** Add any other context about the problem here. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project title: '' labels: '' assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. ================================================ FILE: .github/workflows/docker-image.yml ================================================ name: Docker Image CI on: push: branches: [ master ] workflow_dispatch: jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Build the Docker image run: docker build . --file Dockerfile --tag ghcr.io/wedontpanic/jotoba:latest - name: Export image run: | echo ${{ secrets.GH_TOKEN }} | docker login ghcr.io -u ${{ secrets.GH_USER }} --password-stdin docker push ghcr.io/wedontpanic/jotoba:latest # - name: Update server # uses: garygrossgarten/github-action-ssh@release # with: # command: /home/jotoba/update.sh # host: ${{ secrets.HOST }} # port: ${{ secrets.PORT }} # username: ${{ secrets.SSH_USER }} # privateKey: ${{ secrets.SSH_KEY}} ================================================ FILE: .gitignore ================================================ /html/assets/js/lib/loadAnalytics.js /html/assets/svg /html/audio /html/assets/sitemap.xml /html/assets/*.html !/html/assets/docs.html /target out *.old .env ipadic/ unidic/ unidic-mecab/ data/ data /resources/ /suggestions /suggestions_ audio_old *.bat ======= /indexes/ indexes_old massiv.out */*/target tmp/ img_scan_tmp/ /news cluster_find .idea /resources_src/ /*.zip ================================================ FILE: Cargo.toml ================================================ [workspace] members = ["jotoba_bin", "lib/*"] [profile.dev] opt-level = 2 incremental = true lto = false strip = false [profile.release] lto = "fat" strip = true ================================================ FILE: Dockerfile ================================================ FROM rust:1.70.0-bullseye as build WORKDIR app COPY ./lib ./lib COPY ./.git ./.git COPY ./locales ./locales COPY ./Cargo.lock ./ COPY ./Cargo.toml ./ COPY ./tests ./tests COPY ./scripts ./scripts COPY ./jotoba_bin ./jotoba_bin COPY ./LICENSE ./ RUN apt clean RUN apt-get update --allow-releaseinfo-change -y RUN apt upgrade -y RUN apt install build-essential cmake pkg-config libssl-dev libleptonica-dev libtesseract-dev clang tesseract-ocr-jpn -y # Build your program for release RUN cargo build --release RUN mv target/release/jotoba . FROM debian:bullseye WORKDIR app RUN apt-get update --allow-releaseinfo-change -y RUN apt upgrade -y RUN apt install build-essential pkg-config cmake libssl-dev libleptonica-dev libtesseract-dev clang tesseract-ocr-jpn -y COPY --from=build /app/jotoba . COPY --from=build /app/locales ./locales RUN useradd -s /bin/bash runuser USER runuser # Run the binary CMD ["./jotoba","-s"] ================================================ FILE: LICENSE ================================================ GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ================================================ FILE: README.md ================================================ # Jotoba Jotoba is a free online multi-language japanese dictionary based on lots of various free resources.
Public instance: [jotoba.de](https://jotoba.de)
Discord
### Get the new [Android App](https://play.google.com/store/apps/details?id=com.jotoba.mobile) now! # Team
JojiiOfficial Yukaru
Backend dev Frontend dev
# Dictionary licenses Almost all of the data used by [jotoba.de](https://jotoba.de) comes from external sources like [edrdg](http://www.edrdg.org/) for Words, Kanji, Names and Radicales or [WaniKani](https://www.wanikani.com/) and [Kanjialive](https://kanjialive.com/) for audio sources. For a detailed list of used resources and their licenses please visit [jotoba.de/about](https://jotoba.de/about). # Roadmap Please refer to our [Trello board](https://trello.com/b/nmG0xgaW/jotoba-roadmap) for a roadmap and the developing progress. # Developing Jotoba is open source. Contributions are highly welcome and can be made by anyone who wants to help Jotoba grow.
That being said, all API endpoints exposed by Jotoba are documented and allowed to be used (within a fair amount).
Refer to [API-Docs](https://jotoba.de/docs.html) for the API documentations and to [CONTRIBUTION](https://github.com/WeDontPanic/Jotoba/wiki/Contributing) for an introduction in how to contribute code to Jotoba. # Translations Jotoba is aimed to be a multi-language dictionary thus the website is aimed to be fully translated into all available languages.
However, the main developers of this project don't speak ~10 languages. If you're interested in contributing to this project we are thankful for each translation contribution.
For a guide on how to add translations please refer to the [wiki](https://github.com/WeDontPanic/Jotoba/wiki/Translate-%5BPage%5D). # License Jotoba itself is licensed under AGPL 3.0 or later. Please refer to the [license file](https://github.com/WeDontPanic/Jotoba/blob/master/LICENSE) for further information. Joto-kun (including all of his variants) is licensed under [CC BY-NC-ND 4.0](https://creativecommons.org/licenses/by-nc-nd/4.0/). ================================================ FILE: deny.toml ================================================ # This template contains all of the possible sections and their default values # Note that all fields that take a lint level have these possible values: # * deny - An error will be produced and the check will fail # * warn - A warning will be produced, but the check will not fail # * allow - No warning or error will be produced, though in some cases a note # will be # The values provided in this template are the default values that will be used # when any section or field is not specified in your own configuration # If 1 or more target triples (and optionally, target_features) are specified, # only the specified targets will be checked when running `cargo deny check`. # This means, if a particular package is only ever used as a target specific # dependency, such as, for example, the `nix` crate only being used via the # `target_family = "unix"` configuration, that only having windows targets in # this list would mean the nix crate, as well as any of its exclusive # dependencies not shared by any other crates, would be ignored, as the target # list here is effectively saying which targets you are building for. targets = [ # The triple can be any string, but only the target triples built in to # rustc (as of 1.40) can be checked against actual config expressions #{ triple = "x86_64-unknown-linux-musl" }, # You can also specify which target_features you promise are enabled for a # particular target. target_features are currently not validated against # the actual valid features supported by the target architecture. #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, ] # This section is considered when running `cargo deny check advisories` # More documentation for the advisories section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html [advisories] # The path where the advisory database is cloned/fetched into db-path = "~/.cargo/advisory-db" # The url(s) of the advisory databases to use db-urls = ["https://github.com/rustsec/advisory-db"] # The lint level for security vulnerabilities vulnerability = "deny" # The lint level for unmaintained crates unmaintained = "warn" # The lint level for crates that have been yanked from their source registry yanked = "warn" # The lint level for crates with security notices. Note that as of # 2019-12-17 there are no security notice advisories in # https://github.com/rustsec/advisory-db notice = "warn" # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. ignore = [ #"RUSTSEC-0000-0000", ] # Threshold for security vulnerabilities, any vulnerability with a CVSS score # lower than the range specified will be ignored. Note that ignored advisories # will still output a note when they are encountered. # * None - CVSS Score 0.0 # * Low - CVSS Score 0.1 - 3.9 # * Medium - CVSS Score 4.0 - 6.9 # * High - CVSS Score 7.0 - 8.9 # * Critical - CVSS Score 9.0 - 10.0 #severity-threshold = # If this is true, then cargo deny will use the git executable to fetch advisory database. # If this is false, then it uses a built-in git library. # Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support. # See Git Authentication for more information about setting up git authentication. #git-fetch-with-cli = true # This section is considered when running `cargo deny check licenses` # More documentation for the licenses section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html [licenses] # The lint level for crates which do not have a detectable license unlicensed = "warn" # List of explicitly allowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. allow = [ "MIT", "AGPL-3.0", "GPL-3.0", "Apache-2.0", "BSD-3-Clause", "MPL-2.0", "BSD-2-Clause", "CC0-1.0", "Unicode-DFS-2016", "ISC", #"Apache-2.0 WITH LLVM-exception", ] # List of explicitly disallowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. deny = [ #"Nokia", ] # Lint level for licenses considered copyleft copyleft = "warn" # Blanket approval or denial for OSI-approved or FSF Free/Libre licenses # * both - The license will be approved if it is both OSI-approved *AND* FSF # * either - The license will be approved if it is either OSI-approved *OR* FSF # * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF # * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved # * neither - This predicate is ignored and the default lint level is used allow-osi-fsf-free = "neither" # Lint level used when no other predicates are matched # 1. License isn't in the allow or deny lists # 2. License isn't copyleft # 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" default = "deny" # The confidence threshold for detecting a license from license text. # The higher the value, the more closely the license text must be to the # canonical license text of a valid SPDX license file. # [possible values: any between 0.0 and 1.0]. confidence-threshold = 0.8 # Allow 1 or more licenses on a per-crate basis, so that particular licenses # aren't accepted for every possible crate as with the normal allow list exceptions = [ # Each entry is the crate and version constraint, and its specific allow # list #{ allow = ["Zlib"], name = "adler32", version = "*" }, ] # Some crates don't have (easily) machine readable licensing information, # adding a clarification entry for it allows you to manually specify the # licensing information #[[licenses.clarify]] # The name of the crate the clarification applies to #name = "ring" # The optional version constraint for the crate #version = "*" # The SPDX expression for the license requirements of the crate #expression = "MIT AND ISC AND OpenSSL" # One or more files in the crate's source used as the "source of truth" for # the license expression. If the contents match, the clarification will be used # when running the license check, otherwise the clarification will be ignored # and the crate will be checked normally, which may produce warnings or errors # depending on the rest of your configuration #license-files = [ # Each entry is a crate relative path, and the (opaque) hash of its contents #{ path = "LICENSE", hash = 0xbd0eed23 } #] [licenses.private] # If true, ignores workspace crates that aren't published, or are only # published to private registries. # To see how to mark a crate as unpublished (to the official registry), # visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field. ignore = false # One or more private registries that you might publish crates to, if a crate # is only published to private registries, and ignore is true, the crate will # not have its license(s) checked registries = [ #"https://sekretz.com/registry ] # This section is considered when running `cargo deny check bans`. # More documentation about the 'bans' section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html [bans] # Lint level for when multiple versions of the same crate are detected multiple-versions = "warn" # Lint level for when a crate version requirement is `*` wildcards = "allow" # The graph highlighting used when creating dotgraphs for crates # with multiple versions # * lowest-version - The path to the lowest versioned duplicate is highlighted # * simplest-path - The path to the version with the fewest edges is highlighted # * all - Both lowest-version and simplest-path are used highlight = "all" # List of crates that are allowed. Use with care! allow = [ #{ name = "ansi_term", version = "=0.11.0" }, ] # List of crates to deny deny = [ # Each entry the name of a crate and a version range. If version is # not specified, all versions will be matched. #{ name = "ansi_term", version = "=0.11.0" }, # # Wrapper crates can optionally be specified to allow the crate when it # is a direct dependency of the otherwise banned crate #{ name = "ansi_term", version = "=0.11.0", wrappers = [] }, ] # Certain crates/versions that will be skipped when doing duplicate detection. skip = [ #{ name = "ansi_term", version = "=0.11.0" }, ] # Similarly to `skip` allows you to skip certain crates during duplicate # detection. Unlike skip, it also includes the entire tree of transitive # dependencies starting at the specified crate, up to a certain depth, which is # by default infinite skip-tree = [ #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, ] # This section is considered when running `cargo deny check sources`. # More documentation about the 'sources' section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html [sources] # Lint level for what to happen when a crate from a crate registry that is not # in the allow list is encountered unknown-registry = "warn" # Lint level for what to happen when a crate from a git repository that is not # in the allow list is encountered unknown-git = "warn" # List of URLs for allowed crate registries. Defaults to the crates.io index # if not specified. If it is specified but empty, no registries are allowed. allow-registry = ["https://github.com/rust-lang/crates.io-index"] # List of URLs for allowed Git repositories allow-git = [] [sources.allow-org] # 1 or more github.com organizations to allow git sources for github = [""] # 1 or more gitlab.com organizations to allow git sources for gitlab = [""] # 1 or more bitbucket.org organizations to allow git sources for bitbucket = [""] ================================================ FILE: docker-compose.yaml ================================================ version: "3.7" services: app: image: ghcr.io/wedontpanic/jotoba:latest restart: always ports: - 8080:8080 working_dir: /app volumes: - ./html:/app/html - ./resources:/app/resources - ./data:/app/data ================================================ FILE: html/assets/css/main.css ================================================ /* ----------------- Color Themes ----------------- */ :root, :root.light { --background: #f2f1f0; --overlay: #f3f3f3; --primaryColor: #34a83c; --bgPrimaryColor: #50c058; --secondaryColor: #909dc0; --primaryTextColor: #222222; --secondaryTextColor: #ffffff; --searchBackground: #ffffff; --searchTextColor: #555555; --shadowColor: #222222; --tagColor: #808080; --itemBG: #d3d3d3; --alert: #ff4254; --danger: #dc3545; --danger2: #dd4c5b; /* Special */ --itemBG_075: rgb(211, 211, 211, 0.75); --langSep: rgba(50, 103, 51, 0.1); --lineColor: rgba(0, 0, 0, 0.1); --backgroundShadow: rgba(34, 34, 34, 0.1); /* Used by Radical Picker */ --borderColor: var(--searchTextColor); --disabledColor: #bdbdbd; /* Used by overlays */ --headerColor: var(--borderColor); --headerScrollBar: var(--borderColor); /* Overlay Button */ --buttonText: #1f1f1f; --buttonBg: #dedede; --buttonBgActive: #e6e6e6; /* Overlay Graph */ --graphLink: #d1d1d1; --graphCircle: var(--bgPrimaryColor); --graphStroke: rgb(116 116 116 / 6%); --graphPath: white; --graphText: white; } :root.dark { --background: #202324; --overlay: #1f2123; --primaryColor: #2d9034; --bgPrimaryColor: #338f4f; --secondaryColor: #435993; --primaryTextColor: #d3cfc9; --secondaryTextColor: #e8e6e3; --searchBackground: #181a1b; --searchTextColor: #b2aca2; --shadowColor: #9d9488; --tagColor: #787878; --itemBG: #7a7a7a; --itemBG_075: rgba(122, 122, 122, 0.75); --lineColor: rgba(211, 207, 201, 0.1); --backgroundShadow: rgba(34, 34, 34, 0.2); --borderColor: var(--itemBG_075); --disabledColor: #3c3c3c; --headerColor: var(--lineColor); --headerScrollBar: #434344; --buttonText: #fff; --buttonBg: #404040; --buttonBgActive: #515151; --alert: #e93849; } :root.dark ::-moz-selection { background: var(--secondaryColor); } :root.dark ::selection { background: var(--secondaryColor); } /* ------------------- Scrollbar Adjustments ------------------- */ * { scrollbar-width: thin; } ::-webkit-scrollbar { width: 20px; } ::-webkit-scrollbar-track { background-color: transparent; } ::-webkit-scrollbar-thumb { background-color: #c1c1c1; border-radius: 20px; border: 6px solid transparent; background-clip: content-box; } ::-webkit-scrollbar-thumb:hover { background-color: #a8a8a8; } :root.dark ::-webkit-scrollbar-thumb { background-color: var(--itemBG); } /* ----------------- Overall Page Adjustments ----------------- */ html, body { font-size: 100%; background: var(--background) !important; } body { color: var(--primaryTextColor); cursor: auto; font-family: "Helvetica Neue", Helvetica, Arial, "Source Han Sans", "源ノ角ゴシック", "Hiragino Sans", "HiraKakuProN-W3", "Hiragino Kaku Gothic ProN W3", "Hiragino Kaku Gothic ProN", "ヒラギノ角ゴ ProN W3", "Noto Sans", "Noto Sans JP", "Noto Sans CJK JP", "メイリオ", Meiryo, "游ゴシック", YuGothic, "MS Pゴシック", "MS PGothic", "MS ゴシック", "MS Gothic", sans-serif; font-style: normal; font-weight: normal; line-height: 1.5; margin: 0; padding: 0; position: relative; -webkit-font-smoothing: auto; } h3, h4 { font-family: "Helvetica", "Arial", sans-serif; } body { min-height: 100vh; height: 100vh; } body.index { display: grid; grid-template-rows: 1fr auto; overflow-x: hidden; } .noselect, .tags, .clickable, .entry-count, .no-drag { -webkit-touch-callout: none; /* iOS Safari */ -webkit-user-select: none; /* Safari */ /* Konqueror HTML */ -moz-user-select: none; /* Old versions of Firefox */ -ms-user-select: none; /* Internet Explorer/Edge */ -o-user-select: none; /* Opera */ user-select: none; /* Non-prefixed version, currently supported by Chrome, Edge, Opera and Firefox */ } #backdrop { position: fixed; display: flex; flex-direction: column; align-items: center; justify-content: center; cursor: pointer; z-index: 999999; top: 0; left: 0; width: 100vw; height: 100vh; background-color: var(--backgroundShadow); } button { display: flex; place-content: center; place-items: center; } .btn-danger { color: white !important; background-color: var(--danger) !important; border-color: var(--danger) !important; } .btn-danger:not(:disabled):not(.disabled).active:focus, .btn-danger:not(:disabled):not(.disabled):active:focus { box-shadow: unset; } .btn-danger:hover { background-color: var(--danger2) !important; } .close:focus { outline: 0; } object { pointer-events: none; } .vl { border-left: 1px solid var(--searchTextColor); } :root.dark hr { border-top: 1px solid rgba(255,255,255,.1); } h3 { font-size: 22px; text-align: center; text-align: -webkit-center; font-weight: bold; } h4 { font-size: 11px; color: var(--searchTextColor); margin: 2px 0 0 0; } .hidden { display: none !important; } .highlight { color: var(--primaryColor); } .indented { margin-left: 5%; } .clickable { color: var(--primaryColor); text-align: center; text-align: -webkit-center; cursor: pointer; } .clickable.title { font-size: 22px; font-weight: bold; } .clickable.fat { font-size: 20px; } .clickable:hover { text-decoration: underline; color: var(--primaryColor); } .no-margin { margin: 0px 0px 0px 0px !important; } .no-align { text-align: unset; } .text-left { text-align: left; } .top-padding-05-rem { padding-top: 0.5rem; } .right-padding-10 { padding-right: 10px; } .right-padding-20 { padding-right: 20px; } .d-flex.wrap { flex-wrap: wrap; } .no-highlight { color: var(--primaryTextColor); } .no-highlight:hover { text-decoration: none; } a:hover { color: unset; text-decoration: unset !important; } .black { color: var(--primaryTextColor); } .fat { font-weight: bold; } .center-text { text-align: center; text-align: -webkit-center; } /* ----------------- Commonly Used CSS ----------------- */ .search-suggestion { color: inherit; } .search-suggestion:focus, .search-suggestion:link, .search-suggestion:visited, .search-suggestion:hover { text-decoration: none; } #page-container { padding-top: 10px; padding-left: 10px; padding-right: 10px; } .main-container { width: 100%; max-width: 1145px; height: -webkit-max-content; height: -moz-max-content; height: max-content; } .main-info { width: 100%; height: -webkit-max-content; height: -moz-max-content; height: max-content; padding-bottom: 10px; } .secondary-info { height: -webkit-max-content; height: -moz-max-content; height: max-content; width: 35%; padding-bottom: 10px; padding-left: 10px; } @media only screen and (max-width: 600px) { .secondary-info { padding-left: 0px; } } .tags { color: var(--tagColor); font-size: 12px; margin-top: 6px; } .tags.fat { font-size: 20px; font-weight: bold; color: var(--primaryTextColor); } .tags.slim { font-size: 20px; font-weight: 400; color: var(--primaryTextColor); margin-top: -5px !important; } .tags.no-margin { margin-top: 0px; } .d-flex .row-tag-entry + .row-tag-entry { padding-left: 10px; } .entry-count { color: var(--tagColor); line-height: 30px; margin-right: 5px; position: relative; height: 100%; } /* --- Slider adjustments --- */ .slider-parent { padding-top: 30px; padding-right: 20px; width: 150px; } .slider-output { font-size: 13px; padding-top: 10px; color: var(--primaryColor); } /* The slider itself */ .slider { -webkit-appearance: none; width: 130px; height: 15px; border-radius: 10px; background: var(--itemBG); outline: none; opacity: 0.7; transition: opacity 0.2s; } /* Mouse-over effects */ .slider:hover { opacity: 1; } :root.dark .slider:hover { opacity: 0.8; } /* The slider handle for webkit and mozilla with its extra shit */ .slider::-webkit-slider-thumb { -webkit-appearance: none; appearance: none; width: 25px; height: 25px; border-radius: 50%; border-color: var(--bgPrimaryColor); background: var(--bgPrimaryColor); cursor: pointer; } :root.dark .slider::-webkit-slider-thumb { border-color: #3ace67; background: #3ace67; } .slider::-webkit-slider-thumb:hover { background-color: var(--primaryColor); } :root.dark .slider::-webkit-slider-thumb:hover { border-color: #2eeb67; } .slider::-moz-range-thumb { width: 25px; height: 25px; border-radius: 50%; background: var(--bgPrimaryColor); cursor: pointer; } :root.dark .slider::-moz-range-thumb:hover { background: #3ace67; } .slider::-moz-range-thumb:hover { background-color: var(--primaryColor); } :root.dark .slider::-moz-range-thumb:hover { border-color: #2eeb67; } .res-separator { border-top: 2px solid var(--lineColor); margin-right: 5px; } .res-separator.sentence { width: 100%; } /* Useful stuff */ .flex-center { display: flex; place-content: center; } #loading-screen { visibility: hidden; background-color: #000; opacity: 0; transition: opacity 0.15s linear; z-index: 2000; position: fixed; top: 0; left: 0; width: 100vw; height: 100vh; display: flex; justify-content: center; align-items: center; } #loading-screen.show { display: block; opacity: 0.5; } .loading-animation { border: 16px solid var(--itemBG); border-radius: 50%; border-top: 16px solid var(--primaryColor); width: 100px; height: 100px; -webkit-animation: spin 2s linear infinite; /* Safari */ animation: spin 2s linear infinite; } /* Safari */ @-webkit-keyframes spin { 0% { -webkit-transform: rotate(0deg); } 100% { -webkit-transform: rotate(360deg); } } @keyframes spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } } /* SVG Colors */ :root.dark .mobile-nav-btn > div, :root.dark .mobile-nav-inner-btn > div:not(.jumpSvg) { background-color: var(--searchTextColor) !important; color: var(--searchTextColor) !important; } :root.dark .mobile-nav-inner-btn > span { color: var(--searchTextColor) !important; } .searchSvg, .settingsSvg, .clearSvg, .voiceSvg { mask-size: cover !important; -webkit-mask-size: cover !important; } .searchSvg { height: 18px; width: 18px; background-color: var(--primaryColor); mask: url("/assets/svg/ui/search.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/search.svg") no-repeat center; } .searchSvg.index { height: 16px; width: 16px; margin-top: 3px; margin-left: 5px; background-color: var(--secondaryTextColor); } .settingsSvg { height: 30px; width: 30px; background-color: var(--tagColor); mask: url("/assets/svg/ui/settings.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/settings.svg") no-repeat center; } .infoSvg { scale: 1.1; height: 30px; width: 30px; background-color: var(--tagColor); mask: url("/assets/svg/ui/info.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/info.svg") no-repeat center; } .notificationSvg { scale: 1.1; height: 30px; width: 30px; background-color: var(--tagColor); mask: url("/assets/svg/ui/notification.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/notification.svg") no-repeat center; } .settingsSvg.mobile { height: 26px; width: 26px; } .clearSvg { height: 20px; width: 20px; margin-top: 2px; background-color: var(--tagColor); mask: url("/assets/svg/ui/clear.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/clear.svg") no-repeat center; } .voiceSvg { margin-top: 2px; height: 24px; width: 24px; background-color: var(--tagColor); mask: url("/assets/svg/ui/voice.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/voice.svg") no-repeat center; } .voiceSvg.mobile { height: 30px; width: 30px; background-color: var(--tagColor); } .voiceSvg.index { margin-top: -7px; height: 30px; width: 30px; } .voiceSvg.active { background-color: var(--primaryColor) !important; } .cameraSvg { height: 28px; width: 28px; background-color: var(--tagColor); mask: url("/assets/svg/ui/camera.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/camera.svg") no-repeat center; } .cameraSvg.index { margin-top: -5px; margin-right: 33px; } .jumpSvg { margin-left: -1px; height: 26px; width: 26px; background-color: var(--primaryColor) !important; mask: url("/assets/svg/ui/jump.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/jump.svg") no-repeat center; } .menuSvg { height: 28px; width: 28px; background-color: var(--tagColor); mask: url("/assets/svg/ui/menu.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/menu.svg") no-repeat center; } .undoSvg { height: 20px; width: 20px; background-color: var(--tagColor); mask: url("/assets/svg/ui/undo.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/undo.svg") no-repeat center; cursor: pointer; } .imgUploadSvg { margin: 11px 14px 0px -35px; height: 20px; width: 20px; background-color: var(--tagColor); mask: url("/assets/svg/ui/upload.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/upload.svg") no-repeat center; cursor: pointer; } .downloadSvg { height: 20px; width: 20px; background-color: var(--tagColor); mask: url("/assets/svg/ui/download.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/download.svg") no-repeat center; cursor: pointer; pointer-events: none; } .conjugationSvg { height: 20px; width: 20px; background-color: var(--tagColor); mask: url("/assets/svg/ui/conjugation.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/conjugation.svg") no-repeat center; cursor: pointer; pointer-events: none; } .sentenceSvg { height: 20px; width: 20px; background-color: var(--tagColor); mask: url("/assets/svg/ui/sentence.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/sentence.svg") no-repeat center; cursor: pointer; pointer-events: none; } .transitivitySvg { height: 20px; width: 20px; background-color: var(--tagColor); mask: url("/assets/svg/ui/transitivity.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/transitivity.svg") no-repeat center; cursor: pointer; pointer-events: none; } .linkSvg { height: 20px; width: 20px; background-color: var(--tagColor); mask: url("/assets/svg/ui/link.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/link.svg") no-repeat center; cursor: pointer; pointer-events: none; } .copySvg { height: 21px; width: 21px; background-color: var(--primaryColor); mask: url("/assets/svg/ui/copy.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/copy.svg") no-repeat center; cursor: pointer; pointer-events: none; } .tooltipSvg { height: 20px; width: 20px; background-color: var(--searchTextColor); mask: url("/assets/svg/ui/3dot.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/3dot.svg") no-repeat center; cursor: pointer; } .shareSvg { height: 20px; width: 20px; background-color: var(--disabledColor); mask: url("/assets/svg/ui/share.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/share.svg") no-repeat center; cursor: pointer; } .discordSvg { height: 35px; width: 35px; background-color: var(--tagColor); mask: url("/assets/svg/ui/_discord.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/_discord.svg") no-repeat center; cursor: pointer; } .githubSvg { height: 35px; width: 35px; background-color: var(--tagColor); mask: url("/assets/svg/ui/_github.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/_github.svg") no-repeat center; cursor: pointer; } .donationSvg { height: 35px; width: 35px; margin-top: -1px; background-color: var(--tagColor); mask: url("/assets/svg/ui/_donation.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/_donation.svg") no-repeat center; cursor: pointer; } ================================================ FILE: html/assets/css/mobile.css ================================================ /* Used to make mobile stuff less ugly. Will be transfered into the others file bit-by-bit. */ @media only screen and (max-width: 600px) { .modal-open { margin-right: 0px; } /* -------- Index -------- */ .circle { width: 1.25em; height: 1.25em; border-radius: 50%; font-size: 50px; text-align: center; text-align: -webkit-center; background-color: var(--itemBG_075); line-height: 110%; position: fixed; bottom: .5em; right: .5em; } article { padding-left: 0.5em; padding-right: 1em; } .voiceSvg.index { margin-top: -4px; } /* -------- Word Search -------- */ .search-embedded-btn { right: 5px; margin-top: 9px; } .search-embedded-btn.search { right: 7px; margin-top: 10px; } .search-embedded-btn.radical { display: none; } #voiceBtn > svg { margin-top: 3px; } #search-vl { position: absolute; right: 39px; } #emptyInput { right: 31px; margin-top: 7px; } .choices__item--selectable.selected { color: var(--primaryColor) !important; } .form-main > div > div > div > div .choices__list.choices__list--dropdown { width: -webkit-max-content; width: -moz-max-content; width: max-content; } .searchDivInner form .inner-form .input-field.first-wrap .choices__list.choices__list--dropdown { padding-right: 20px; } .overlay.suggestion { width: 98%; margin-left: 1%; } .image-search-input { width: 100%; } #shadow-text { padding: 0px; margin: 13px 15px; } #content-container { flex-direction: column!important; min-width: 350px; } body { height: -webkit-max-content; height: -moz-max-content; height: max-content; } .searchDivInner form .inner-form .input-field.first-wrap .choices__inner .choices__list--single .choices__item { display: none; } .searchDivInner form .inner-form .input-field.first-wrap { width: 40px; } .searchDivInner form .inner-form .input-field input { height: 100%; background: transparent; border: 0; display: block; width: 88%; padding: 25px 0px 27px 15px; font-size: 1em; color: var(--searchTextcolor); } #searchDiv { margin: 0px; width: 100%; max-width: 1000px; } h3 { margin-bottom: 2rem; } .title-div { width: 100%; } .btn-container, .main-tab-select { display: none !important; } .title-div { padding-top: 10px; width: 100%; } .main-info { height: -webkit-max-content; height: -moz-max-content; height: max-content; width: 100%; } .mdl-menu__container { margin-right: 15px; } .main-info > .d-flex.center { flex-direction: column!important; align-self: center; align-items: center; width: 100%; } .main-info > .d-flex.flex-row { align-self: center; align-items: center; margin-left: -10px; } .definition-wrapper.d-flex.flex-row { padding-bottom: 5px; } .secondary-info { height: -webkit-max-content; height: -moz-max-content; height: max-content; align-self: center; width: 100%; /* padding-left: 15%; */ } .secondary-info > div > .kanji-entry { justify-content: center; } .kanji-entry.left.fixed > .d-flex { padding-bottom: 5px; } .kanji-entry.right { padding-left: 2vw; padding-right: 2vw; width: 100%; } .kanji-preview.large { position: absolute; } .translation.kanji { padding-top: 70px !important; padding-left: 0px !important; padding-right: 0px !important; text-align: center; text-align: -webkit-center; } .kanji-parent > .tags { padding-bottom: 5px; text-align: center; text-align: -webkit-center; } .list-entry > .tags, .kanji-entry.right, .tags.no-margin { text-align: left; } .entry-min-height-1 { min-height: unset; } .entry-min-height-2 { min-height: unset; } /* ----------- Overlay ----------- */ .clickable.collocation { width: 70%; margin-left: 20%; } .table.conjugation { margin-left: 0px; width: 101%; } /* -------- Radical Picker -------- */ .rad-picker-icon { font-size: 30px; } /* -------- Image Upload -------- */ .cropping-target-border { width: calc(100% - 30px); left: 15px; } .croppie-container { width: calc(100% - 60px); left: 30px; } .croppie-container .cr-boundary { height: 485px; overflow: scroll; } /* -------- Help Page --------- */ .help-joto { display: none; } /* -------- Mobile only -------- */ .mobile-nav { overflow-y: hidden; position: fixed; bottom: 75px; right: 24px; z-index: 100; height: 50%; width: 60px; } .mobile-nav-btn { place-content: center; place-items: center; display: flex; position: fixed; bottom: 20px; right: 30px; z-index: 99; border: none; outline: none; background-color: var(--itemBG_075); border-radius: 50%; width: 50px; height: 50px; text-align: center; text-align: -webkit-center; } .mobile-nav-inner-btn { width: 45px; height: 45px; border: none; outline: none; background: var(--itemBG_075); border-radius: 50%; margin-bottom: 5px; margin-left: 6px; transition: all .2s linear; } .mobile-nav.hidden > .mobile-nav-inner-btn { margin-bottom: -50px; } #jmp-btn { padding-bottom: 5px; transform: rotate(180deg); } #jmp-btn > svg > polygon { fill: var(--primaryColor); } .kanji-jump { text-align: center; text-align: -webkit-center; margin-bottom: 20px; } .kanji-jump.parent { text-align: center; text-align: -webkit-center; width: 100%; } .word-title { display: none; } .info-h3 { margin-bottom: 0px; } /* Radical Picker */ .rad-result-preview { display: none; } .mobile-nav-btn:focus, .mobile-nav-inner-btn:focus { outline: none; box-shadow: 0 0 3pt 2pt var(--primaryColor); } } /* Mobile only */ @media only screen and (min-width: 600px) { .circle, .mobile-nav, .mobile-nav-btn, .mobile-nav-inner-btn, .kanji-jump, .desktop-br { display: none; } } ================================================ FILE: html/assets/css/overlay/croppingOverlay.css ================================================ .modal.fade .modal-dialog.crop { transition: unset; transform: unset; } .cropping-target-border { position: fixed; width: 52vw; height: 596px; background: var(--itemBG); left: 25%; top: 25px; } .croppie-container { position: fixed; width: 50%; height: 500px; left: 26%; top: 50px } .croppie-container .cr-boundary { height: 525px; } .btn-search.crop { width: 110px; height: 30px; display: inline; position: absolute; right: 25px; top: 93.75%; background: var(--bgPrimaryColor); } .btn-danger.crop { left: 25px; } .croppie-container .cr-slider-wrap { margin: 10px auto; } .cr-slider { padding: 0; } .cr-slider::-webkit-slider-thumb { -webkit-appearance: none; appearance: none; margin-top: -9px; width: 25px; height: 25px; background: var(--primaryColor); cursor: pointer; } .cr-slider::-moz-range-thumb { width: 25px; height: 25px; margin-top: -9px; background: var(--primaryColor); cursor: pointer; } .cr-slider::-webkit-slider-runnable-track{ width:100%; height:7px; background:rgba(0,0,0,.5); border:0; border-radius:3px } .cr-slider::-moz-range-track{ width:100%; height:7px; background:rgba(0,0,0,.5); border:0; border-radius:3px } ================================================ FILE: html/assets/css/overlay/footerOverlay.css ================================================ .cookie-footer { display: block; position: fixed; bottom: 5%; padding: 10px; background: var(--overlay); border: 2px solid var( --tagColor); border-radius: 10px; z-index: 100; max-width: -webkit-max-content; max-width: -moz-max-content; max-width: max-content; width: inherit; width: 90%; left: 50%; transform: translate(-50%, 0); } .cookie-footer > .res-separator { margin-top: 5px; margin-bottom: 5px; } .cookie-btn { margin-top: 10px; margin-right: 10px; } .joto-cookie { width: 85px; margin-right: 25px; } /* Mobile adjustments */ @media only screen and (max-width: 600px) { .cookie-footer span, .cookie-btn { font-size: 12px; } .joto-cookie { width: 125px; margin-right: 25px; } } ================================================ FILE: html/assets/css/overlay/imgUploadOverlay.css ================================================ .image-search-input { width: 50%; padding: 0px 39px 0px 8px; margin-top: 4px; display: block; border: 1px solid var(--lineColor); background-color: var(--searchBackground); } .image-search-input.disabled { cursor: not-allowed; background: var(--backgroundShadow); } .image-search-input:focus { outline: none; } .image-search-upload { opacity: 0; margin-left: -40px; margin-right: 10px; width: 30px; margin-top: 5px; cursor: pointer; } .image-search-upload-btn { border: 1px solid var(--lineColor); background: var(--overlay); color: var(--primaryTextColor); padding: 5px; font-size: small; margin-top: 4px; } ================================================ FILE: html/assets/css/overlay/notificationOverlay.css ================================================ #notifications-container { position: absolute; top: 60px; right: 55px; width: 16rem; z-index: 10; overflow: hidden; border-radius: .5rem; background-color: var(--overlay); color: var(--primaryTextColor); box-shadow: 0px 8px 20px 0px var(--backgroundShadow); } .notifications-info-container { display: flex; flex-flow: column; width: 100%; padding: 1rem; overflow: hidden; } .notification-title { padding: .5rem; margin: -1rem -1rem .5rem; background-color: var(--bgPrimaryColor); background: linear-gradient(45deg,var(--bgPrimaryColor),var(--primaryColor)); color: var(--secondaryTextColor); text-align: center; text-transform: uppercase; font-size: large; } :root.dark .notification-title { background: var(--bgPrimaryColor); } #notification-content { margin: .5rem 0; opacity: .85; font-weight: 400; } #no-result { padding-bottom: 10px; text-align: center; } .notification-entry { border-bottom: 1px solid var(--borderColor); padding: 5px; cursor: pointer; } .notification-entry:not(#no-result):hover { background: rgba(0, 0, 0, 0.02); } :root.dark .notification-entry:not(#no-result):hover { background: rgba(255, 255, 255, 0.02); } .entry-title { font-weight: bold; font-size: 16px; text-overflow: ellipsis; overflow: hidden; max-width: 60%; } .date-tag { position: absolute; right: 25px; margin-top: -20px; font-size: 10px; } .content > li { line-height: 15px; } .button-container { display: flex; grid-gap: .5rem; gap: .5rem; margin-top: 10px; margin-bottom: -10px; justify-content: flex-end; } #notificationModal h1 { font-size: 1.5rem; } #notificationModal h2 { font-size: 1.25rem; } ================================================ FILE: html/assets/css/overlay/overlayBase.css ================================================ .overlay { z-index: 1; margin-top: 2px; position: absolute; width: 100%; height: -webkit-max-content; height: -moz-max-content; height: max-content; border: 2px solid var(--searchBackground); border-radius: 20px; border-top: none; background-color: var(--searchBackground); box-shadow: 0px 8px 20px 0px var(--backgroundShadow) } .overlay > * { z-index: 2; } .overlay > .flex-column { margin: 5px 10px; } .x-button { position: absolute; right: 10px; font-size: larger; } .x-button:hover { cursor: pointer; } .modal-header { border-bottom-color: var(--lineColor); } :root.dark .modal-header > .close { color: #fff; } :root.dark .modal-header > .close:hover { color: #97a495; } .modal-footer { border-top-color: var(--lineColor); } .modal-content { background-color: var(--overlay); } #default_lang_settings { margin-top: 0.25rem !important; } .form-control.small { width: 100%; height: 20px; padding: 0; padding-left: 1%; display: inline; color: var(--searchTextColor); background: var(--searchBackground); border-color: var(--lineColor); } .form-control.small:focus { box-shadow: unset; } .overlay-button { position: relative; display: inline-flex; flex: auto 0 0; justify-content: center; align-items: center; grid-gap: 4px; gap: 4px; padding: 0 0.625rem; margin: 0; border: none; height: 2rem; font-size: 1rem; border-radius: 4px; cursor: pointer; } .overlay-button { color: var(--buttonText); background-color: var(--buttonBg); flex-grow: 1; outline: 0px solid var(--primaryColor); transition: all .05s ease; } .overlay-button:hover, .overlay-button:active { background-color: var(--buttonBgActive); } .overlay-button:active { outline: 2px solid var(--primaryColor); } ================================================ FILE: html/assets/css/overlay/radicalOverlay.css ================================================ /* Used by the radical picker only. */ .rad-results { padding-left: 5px; padding-top: 10px; min-height: 90px; max-height: 25vh; overflow-y: scroll; } .rad-suggestion-wrapper { width: 100%; border-top: 1px solid var(--borderColor); margin-top: 10px; overflow-x: scroll; scrollbar-width: none; background: var(--itemBG_075); } .rad-suggestion-wrapper::-webkit-scrollbar { width: 0; height: 0; } #suggestion-container-rad { overflow-y: auto; width: -webkit-max-content; width: -moz-max-content; width: max-content; } #suggestion-container-rad > .search-suggestion:first-child { padding-top: 0px !important; } #suggestion-container-rad > .search-suggestion { width: -webkit-max-content !important; width: -moz-max-content !important; width: max-content !important; padding-bottom: 0px !important; padding-left: 10px !important; padding-right: 10px !important; } .rad-page-footer { height: 50px; border-top: 1px solid var(--borderColor); } .overlay.radical > .clickable { padding-left: 5px; } .overlay.radical > .x-button { margin: 1px 5px 5px 5px; } .rad-page-toggle { display: flex; flex-direction: row; height: 28px; margin-top: 10px; margin-bottom: -2px; cursor: default; } .rad-page-toggle:hover { cursor: pointer; } .rad-page-toggle > span { margin-left: 5px; margin-right: 5px; padding-bottom: 1px; font-size: 18px; width: 45px; text-align: center; cursor: pointer; } #r-tc { display: none; text-align: center; text-align: -webkit-center; } #r-tc.show { display: block; } #r-tc > .searchSvg { background-color: var(--searchTextColor); width: 15px; height: 15px; margin-top: 4px; margin-left: auto; margin-right: auto; } .rad-page-toggle > span:first-child { margin-left: 20px; } .rad-page-toggle > span.disabled { color: var(--disabledColor); } .rad-page-toggle > span.highlighted { color: var(--primaryColor); } .rad-page-toggle span.selected { color: var(--primaryColor); border-color: var(--primaryColor); border: 2px solid var(--borderColor); border-bottom: unset; background: var(--background); border-radius: 6px 6px 0px 0px; } .rad-kanji-wrapper { border: 2px solid var(--borderColor); margin: 0px -2px -2px -2px; border-radius: 15px; overflow-x: auto; } .rad-kanji-title { font-size: 17px; color: var(--searchTextColor); margin: 5px; } .rad-wrapper { background-color: var(--background); } .kanji-wrapper { background-color: var(--background); margin-top: 10px; margin-bottom: -10px; border-top: 1px solid var(--borderColor); border-top-width: 1px; width: 100%; } .rad-picker { overflow-y: scroll; margin-bottom: -10px; padding: 5px; height: 86px; scrollbar-width: none; } .rad-picker::-webkit-scrollbar { width: 0px; } .rad-btn { display: inline-block; border-radius: 2px; font-size: 24px; text-align: center; text-align: -webkit-center; margin: 1px; height: 36px; width: 36px; padding: 2px 4px; line-height: 1.4; } .rad-btn.picker.selected { color: var(--secondaryTextColor); background-color: var(--bgPrimaryColor); border-radius: 5px; } .rad-btn.picker.disabled { color: var(--lineColor); } .rad-btn.picker.disabled:hover { cursor: unset; } .rad-btn.picker { background-color: none; } .rad-btn.num { color: var(--secondaryTextColor); background: none; min-width: 32px; line-height: 36px; font-weight: bold; font-size: 20px; padding: 0 5px; } .rad-btn.num { background-color: var(--borderColor); } .rad-btn:hover:not(.num) { cursor: pointer; } .kanji-search-wrapper > .searchSvg{ display: inline-block; margin: 15px 0px 0px 18px; width: 20px; height: 20px; background-color: var(--tagColor); } .kanji-search-wrapper > .btn-search{ position: absolute; height: 30px; width: 100px; right: 20px; margin-top: -29px; border-radius: 15px; } #kanji-search { background-color: var(--searchBackground); border: none; border-bottom: 2px solid var(--backgroundShadow); color: var(--searchTextColor); position: absolute; padding-left: 40px; margin-top: 8px; margin-left: 10px; height: 35px; width: 70%; max-width: calc(100% - 175px); } .undoSvg { position: absolute; scale: 1.2; left: calc(70% + 20px); margin-top: 18px; } #kanji-search:focus-visible, #kanji-search:focus { outline: unset; border-bottom: 2px solid var(--tagColor); } /* ---------- Scrollbar changes ---------- */ /* Firefox */ .rad-results, .rad-picker, .overlay.radical, .rad-page-toggle { scrollbar-width: none; } /* Literally everyone else */ .rad-results::-webkit-scrollbar, .rad-picker::-webkit-scrollbar, .overlay.radical::-webkit-scrollbar, .rad-page-toggle::-webkit-scrollbar { width: 0px; height: 0px; } /* Mobile Adjustments */ @media only screen and (max-width: 600px) { body { min-height: 650px; } .rad-results { max-height: 20vh !important; } .rad-page-toggle { height: 30px; overflow-x: auto; white-space: nowrap; max-width: 90%; } .rad-page-toggle > span:first-child { margin-left: 10px; margin-right: 0px; } .rad-page-toggle > span:not(:first-child) { width: -webkit-max-content; width: -moz-max-content; width: max-content; padding: 0px 2px 0px 2px; } .rad-page-toggle > span { margin-left: 2%; margin-right: 2%; } .undoSvg { scale: 1; right: 132px; left: unset; margin-top: 16px; } } ================================================ FILE: html/assets/css/overlay/settingsOverlay.css ================================================ #settingsModal .modal-body { height: 600px; margin: 0; padding: 0; } #settingsModal .choices__list--dropdown .choices__list { max-height: 350px; overflow-y: scroll; } #settingsModal .close { margin: -1rem -1rem -1rem auto; color: var(--secondaryTextColor); } #settingsModal .close:hover { color: #a7a7a7; } #settingsModal .choices:after { right: 15px; margin-left: unset; } #show_anim_speed_settings_slider { font-size: 14px; } .mdl-layout__header-row { padding: 0 40px 0 25px !important; } .mdl-layout__tab { padding: 0 24px !important; } .mdl-layout__tab-bar { width: calc(100% - 56px); } .mdl-layout__header, .mdl-layout__tab-bar { background-color: var(--headerColor); } .mdl-layout__tab-bar-button { background-color: var(--headerScrollBar); } .mdl-layout.is-upgraded .mdl-layout__tab.is-active::after { background-color: var(--bgPrimaryColor); } .page-content { margin: 15px; } .settings-entry { display: flex; flex-direction: row; } .settings-entry.ex { font-size: 0.8em; } .settings-entry.txt-input { margin-top: 5px !important; margin-bottom: -15px !important; } .settings-entry:not(:first-child, .no-gap) { margin-top: 10px; } .inner-header { font-weight: bold; text-decoration: underline; } .inner-header:first-child { margin-bottom: 5px; } .inner-title { width: 60%; font-size: 15px; align-self: center; } .inner-header:not(:first-child) { margin-bottom: -5px; margin-top: 10px; } .inner-title.display { width: 72%; } .inner-title.txt-input { margin-top: -10px; } .settings-entry.sub > .inner-title { width: 55%; margin-left: 5%; } .settings-entry.sub > .inner-title::before { content: "↪"; margin-left: -5px; margin-right: 5px; } .mdl-checkbox { width: 0%; } .mdl-textfield.mdl-js-textfield.is-upgraded { width: 65%; margin: -20px 0; padding-right: 65px; } .mdl-textfield__input { text-align: center; } .mdl-textfield.is-focused .mdl-textfield__label:after { width: calc(100% - 65px); } .mdl-textfield__error { width: 150%; margin-left: -50px; } :root.dark .mdl-textfield__label { color: var(--borderColor); } .mdl-textfield__label:after { background-color: var(--bgPrimaryColor); } #show_anim_speed_settings { margin-top: 10px; } .mdl-textfield__label:after { left: 0px; } .mdl-checkbox.is-checked .mdl-checkbox__box-outline { border-color: var(--primaryColor); } .mdl-checkbox.is-checked .mdl-checkbox__tick-outline { background-color: var(--primaryColor); } :root.dark .mdl-checkbox__box-outline { border-color: var(--borderColor); } :root.dark .mdl-textfield__input { border-color: var(--itemBG); } @media only screen and (max-width: 600px) { .inner-title { width: 70%; } .settings-entry.sub > .inner-title { width: 65%; } .inner-title.big { width: 80%; } .inner-title.display { width: 76%; } .mdl-textfield.mdl-js-textfield { padding-right: 0px !important; left: 7px !important; } .mdl-textfield.is-focused .mdl-textfield__label:after { width: 100%; } .mdl-textfield__label:after { left: 45%; } .slidercontainer.settings { text-align: center; } } ================================================ FILE: html/assets/css/overlay/suggestionOverlay.css ================================================ #search::-moz-selection { color: var(--primaryTextColor); background-color: var(--secondaryColor); } #search::selection { color: var(--primaryTextColor); background-color: var(--secondaryColor); } .overlay.suggestion { border: unset; width: 76%; margin-left: 17.5%; } .search-suggestion { padding-left: 30px; padding-bottom: 5px; font-size: 16px; cursor: pointer; } .search-suggestion:first-child { padding-top: 5px; } .search-suggestion.selected, .search-suggestion:hover { background: var(--lineColor); font-weight: bold; } .secondary-suggestion { color: var(--tagColor); position: absolute; font-size: 14px; padding-left: 10px; margin-top: 1px; } #shadow-text { position: absolute; pointer-events: none; height: 100%; background: transparent; border: 0; display: block; width: 88%; padding: 13px 32px; font-size: 16px; color: var(--searchTextColor); z-index: 4; opacity: 0.4; overflow: hidden; } ================================================ FILE: html/assets/css/page/aboutPage.css ================================================ article { padding-left: 10px; } .about-title { font-size: 22px; font-weight: bold; color: var(--primaryColor); } .joto-wizard { width: 60px; position: relative; margin-top: -37px; margin-left: 10px; } ================================================ FILE: html/assets/css/page/errorPage.css ================================================ body { background: #f2f1f0; color: #222222; font-family: 'Open Sans', sans-serif; max-height: 700px; overflow: hidden; } .err-parent { text-align: center; text-align: -webkit-center; display: block; position: relative; width: 80%; margin: 100px auto; } .err-parent > *:not(:first-child) { margin-bottom: 10px; } .err-code { font-size: 220px; position: relative; display: inline-block; z-index: 2; height: 250px; letter-spacing: 15px; } .txt-primary { text-align: center; text-align: -webkit-center; display: block; position: relative; padding-bottom: 5px; letter-spacing: 8px; font-size: 3em; line-height: 80%; text-transform: uppercase; } .txt-secondary { text-align: center; text-align: -webkit-center; display: block; position: relative; font-size: 20px; text-transform: uppercase; padding-bottom: 5px; } .back-btn { background-color: #50c058; position: relative; display: inline-block; width: 358px; padding: 5px; z-index: 5; font-size: 25px; margin: 0 auto; color: white; text-decoration: none; } .issue-btn { position: relative; display: inline-block; width: 308px; padding: 5px; z-index: 5; font-size: 25px; margin: 0 auto; color: white; text-decoration: none; } .git-logo { fill: black; } a:hover { text-decoration: none; color: white; } hr { padding: 0; border: none; border-top: 5px solid #50c058; color: #50c058; text-align: center; text-align: -webkit-center; margin: 0px auto; width: 420px; height: 10px; z-index: -10; } /* Mobile adjustments */ @media only screen and (max-width: 600px) { .err-code { font-size: 150px; height: 150px; margin-left: 1vw; } .txt-primary { font-size: 2.5em; } .back-btn, hr { width: 100%; } .git-logo { width: 100%; } } /* Mobile adjustments */ @media only screen and (max-width: 300px) { .err-code { font-size: 120px; margin-top: -30px; } } ================================================ FILE: html/assets/css/page/footer.css ================================================ footer { margin: auto; width: 50%; padding-top: 50px; padding-bottom: 15px; max-width: 1150px; width: 100%; height: -webkit-max-content; height: -moz-max-content; height: max-content; } .ref-row { display: flex; place-content: center; gap: 2em; } .ref-row > .discordSvg, .ref-row > .githubSvg , .ref-row .donationSvg { background-color: var(--searchTextColor); } :root.dark .ref-row > .discordSvg, :root.dark .ref-row > .githubSvg, :root.dark .ref-row .donationSvg { background-color: var(--tagColor); } .ref-row > .donation { position: relative; } .ref-row > .donation > .tooltip { position: absolute; visibility: hidden; width: max-content; max-width: 33vw; text-align: center; border-radius: 6px; padding: 0.5em; z-index: 1; color: #fff; background-color: var(--bgPrimaryColor); box-shadow: 0 0 10px 8px var(--backgroundShadow); } .ref-row > .donation:hover .tooltip { visibility: visible; } .footer-hr { border-top: 1px solid var(--primaryColor); padding-bottom: 10px; } .footer-hr:before { content: ''; border-radius: 100%; position: absolute; height: 20px; width: 120px; background: var(--background); margin: -10px; /* left: calc(50% - 50px); */ margin-left: -60px; box-shadow: inherit } .footer-hr:after { content: ''; border-radius: 100%; position: absolute; height: 10px; width: 10px; background: var(--bgPrimaryColor); margin: -5px; box-shadow: inherit } ================================================ FILE: html/assets/css/page/helpPage.css ================================================ .help-joto { width: 100px; position: relative; margin-top: -320px; margin-left: 530px; } div > .fat:not(:first-child) { margin-top: 1rem; } article .fat { margin-top: 1rem; } ================================================ FILE: html/assets/css/page/indexPage.css ================================================ /* ----------------- Index Page ----------------- */ .title { padding-top: 5%; padding-bottom: 1.25%; margin: auto; width: 30%; } .titleImg { width: 30vw; } form { -webkit-margin-after: 1em; margin-block-end: 1em; } #searchDiv { max-width: 1000px; } .overlay { margin-top: -14px; } .overlay.suggestion { width: 81%; margin-left: 17.5%; } .x-button { margin-right: 5px; } .index-btn-container { display: flex; place-content: center; } .settingsBtn, .infoBtn, .notificationBtn { position: absolute !important; cursor: pointer; } .settingsBtn { top: 20px; left: 20px; } .notificationBtn { top: 29px; right: 25px; } .notificationPoint { display: none; position: absolute; pointer-events: none; top: 32px; right: 30px; padding: 0.25rem; border-radius: 2rem; z-index: 500; width: 9px; height: 9px; background-color: var(--alert); } .notificationBtn.update ~ .notificationPoint { display: block; } .infoBtn { top: 29px; right: 60px; } .notificationBtn.update, .infoBtn.new { background-color: var(--primaryColor); } .inner-form { border-radius: 20px !important; } .input-field.third-wrap { width: 120px; height: 45px; margin: 11px 4px; } .input-field.third-wrap.rad { width: 180px; height: 45px; margin: 11px 4px; } .btn-search { color: white !important; } .input-field.third-wrap.rad > .btn-search { background: var(--secondaryColor); } .rad-picker-icon { color: white; font-size: 22px; margin-left: 5px; } .searchDivInner form .inner-form .input-field input { width: 96%; } .searchDivInner form .inner-form .input-field.first-wrap { display: unset !important; } /* Mobile View */ @media only screen and (max-width: 600px) { @-webkit-keyframes dropdownAnim { from {height: 0px;} to { height: 360%;} } @keyframes dropdownAnim { from {height: 0px;} to { height: 360%;} } .title { padding-top: 15%; width: 72%; } .titleImg { width: 70vw; } .choices.main[data-type*="select-one"]:after { border-color: var(--secondaryTextColor) transparent transparent transparent; } .choices.main[data-type*="select-one"].is-open:after { border-color: transparent transparent var(--secondaryTextColor) transparent !important; } .inner-form > .index-btn-container { position: absolute; top: 75px; width: 50%; place-content: flex-start; z-index: -1; } .choices__item.index { color: white !important; } .searchDivInner form .inner-form .input-field.first-wrap .choices__inner .choices__list--single .choices__item { display: unset !important; margin-top: 4px; } .input-field.first-wrap { background: var(--bgPrimaryColor); border-radius: 3px; height: 45px !important; width: 100% !important; margin-left: 10px; } .overlay { z-index: 1; } .overlay.suggestion { width: 100%; margin-left: 0%; } #suggestion-container { margin: 5px -5px; } .rad-picker { max-height: 44vh; } #settingsModal { display: none; } main > .index-btn-container { flex-direction: row-reverse; place-content: flex-start; } .settingsBtn { display: unset !important; left: 10px !important; } .btn-container { display: block !important; } .infoBtn, .notificationBtn { display: unset !important; } .infoBtn { top: 20px; right: 50px; } .notificationBtn { top: 20px; right: 15px; } .notificationPoint { top: 20px; right: 15px; } .input-field.third-wrap:not(.rad) { display: none; } .input-field.third-wrap.rad { width: -webkit-max-content; width: -moz-max-content; width: max-content; margin-top: 9px; padding-right: 15px; } .btn-search { border-radius: 3px; padding-left: 10px; padding-right: 10px; } .mobile-nav-btn { display: none; } footer > div > span { font-size: 15px; } } ================================================ FILE: html/assets/css/page/infoPage.css ================================================ .help-cat { padding-top: 25px; } .table { display: flex; flex-direction: column; margin-left: 25px; } .row { display: flex; flex-direction: row; } .row span:first-child { color: var(--primaryColor); flex: 0 0 150px; } article { max-width: 700px; } ================================================ FILE: html/assets/css/page/kanjiPage.css ================================================ .kanji-entry.left.detail { width: 155px; padding-left: 25px; } .kanji-entry.right.detail { overflow-x: hidden; padding-left: 35px; width: 100%; max-width: 1000px; } .kanji-preview.x-large { cursor: pointer; font-size: 100px; } .kanji-preview-info { padding-left: 5px; } .kanji-preview-right { max-width: 350px; text-align: right; } .translation.big { font-size: 25px; } .kanji-preview-left { width: 63%; padding-right: 10%; } .kun-reading { padding-left: 10px; } .kun-reading, .on-reading { width: 100%; } .speed-tag { gap: 1em; align-items: center; } .kanji-img { position: absolute; pointer-events: none; margin-bottom: -130px; opacity: 0; } .animation-container { padding: 1em 0 1.5em 0 !important; place-content: center; } .animation-controller { margin: 0 1em -1em 0; gap: 0.5em; width: 35%; } .animation-controller .slider { width: 100%; } .animation-group > .l { border-top-left-radius: 15px; border-bottom-left-radius: 15px; } .animation-group > .m { margin-left: 2px; margin-right: 2px; width: 60%; } .animation-group > .m > span { pointer-events: none; } .animation-group > .r { border-top-right-radius: 15px; border-bottom-right-radius: 15px; } .animation-group > button { border: 0px; background: var(--bgPrimaryColor); color: white; width: 20%; height: 2em; line-height: 2; } .reset-btn { width: 24px; position: relative; left: 95px; top: -25px; } .animation-group > button > img { width: 45%; } .compounds-dropdown-parent { width: 97%; } .compounds-dropdown:after { position: absolute; content: ""; height: 0; width: 0; border-style: solid; border-width: 5px; border-color: var(--tagColor) transparent transparent transparent; pointer-events: none; transition: linear 0.2s; right: 0; margin-top: -3px; } .compounds-dropdown.closed:after { border-color: transparent transparent var(--tagColor) transparent; margin-top: -8px; } .compounds-click-area { position: absolute; width: 100%; height: 20px; margin-top: -25px; } .anim-container svg { user-select: none; } .anim-container text { font-size: 8px; } .tree-parent { position: relative; cursor: initial; width: 90%; max-width: 1145px; } #tree-toggle { position: absolute; user-select: none; cursor: pointer; right: 1em; top: 1em; height: 26px; width: 26px; zoom: 1.2; } @media only screen and (max-width: 600px) { #tree-toggle { zoom: 1; } } #tree-toggle { background-color: var(--primaryColor) !important; mask: url("/assets/svg/ui/graph_filled.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/graph_filled.svg") no-repeat center; } #tree-toggle.detailed { mask: url("/assets/svg/ui/graph_empty.svg") no-repeat center; -webkit-mask: url("/assets/svg/ui/graph_empty.svg") no-repeat center; } #tree-target { align-items: center; text-align: center; padding: 5%; box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19); background-color: var(--overlay); } #tree-target > svg { max-height: 80vh; max-width: 80vw; } #tree-target .link { fill: none; stroke: var(--graphLink); stroke-width: 3px; } #tree-target circle { fill: var(--graphCircle) !important; stroke: var(--graphStroke); stroke-width: 2px; } #tree-target circle.clickable { cursor: pointer; } #tree-target text { fill: var(--graphText); font: 20px sans-serif; } #tree-target path { pointer-events: none; stroke: none; fill-rule: nonzero; fill: var(--graphPath); fill-opacity: 1; transform: translateX(-15px) translateY(-15px) scale(0.03); } /* Kanji dark mode */ :root.dark .stroke-container > svg > path.active { stroke: rgb(211, 207, 201) !important; } :root.dark .stroke-container > svg > path.not.active { stroke: rgb(105, 105, 105) !important; } :root.dark .stroke-container > svg > line { stroke: rgb(92, 92, 92) !important; } :root.dark .stroke-container > svg > circle { fill: rgb(95, 241, 96) !important; opacity: 0.75 !important; } :root.dark .anim-container > svg path:not(.bg) { stroke: var(--primaryTextColor); } :root.dark .anim-container text { fill: var(--primaryTextColor); } /* Everything lower than max size */ @media only screen and (max-width: 1150px) { .animation-group > button > img { width: 1.5em; } .animation-controller { width: 50%; } } /* Small screens */ @media only screen and (max-width: 600px) { .compounds-parent { flex-direction: column !important; } .compounds-dropdown:after { right: -8px; } .kun-reading { padding-left: 0px; padding-top: 20px; } .main-container > .d-flex { flex-direction: column; } .kanji-entry.left.detail { width: 100%; align-self: center; padding-left: 0px; } .kanji-preview.x-large { align-self: center; } .translation.big { font-size: 20px; padding-bottom: 10px; } .kanji-preview-info { padding-left: 0px; text-align: left; } .kanji-entry.right.detail { width: unset; padding-left: 0px; } .kanji-entry.right.detail > .kanji-entry { flex-direction: column !important; } .kanji-preview-left { padding: 0; width: 100%; text-align: center; text-align: -webkit-center; } .rad-parts-parent { display: flex !important; flex-direction: row !important; } .notes.stroke { text-align: center; text-align: -webkit-center; } .notes.rad { width: 50%; } .notes.parts { width: 50%; padding-right: 20px; text-align: right; } .kanji-preview-left > .d-flex { padding-left: 10px; } .kanji-preview-right { max-width: unset; padding-top: 10px; text-align: left; } .notes { padding-left: 10px; } .tags.fat { font-size: 15px; } .clickable.fat { font-size: 15px; } .stroke-container { max-width: 99vw; padding-left: 1vw; } .on-reading { padding-top: 10px; } .slider { width: 100px; height: 10px; } .slider::-webkit-slider-thumb { width: 20px; height: 20px; border-radius: 50%; } .slider::-moz-range-thumb { width: 20px; height: 20px; border-radius: 50%; } .slider-output { margin-left: -10px; } } /* Very small screens */ @media only screen and (max-width: 400px) { .animation-group > button > img { width: 1.25em; } } ================================================ FILE: html/assets/css/page/multiPage/kana.css ================================================ .inline-kana-preview { line-height: 1.1; font-size: xx-large; padding-top: 24px; } ================================================ FILE: html/assets/css/page/multiPage/kanji.css ================================================ .kanji-entry { height: -webkit-fit-content; height: -moz-fit-content; height: fit-content; padding-bottom: 10px; } .kanji-entry.right { position: relative; width: 100%; } .kanji-preview { line-height: 1.1; font-size: xx-large; } .translation { font-size: 19px; } .stroke-container { max-width: 80vw; overflow-x: auto; } .animation-container { padding-left: 30%; padding-top: 10px; } .kanji-entry > d-flex.flex-row { flex-flow: row wrap; } .furigana-kanji-container { text-align: center; text-align: -webkit-center; word-spacing: 10px; } .furigana-preview { margin-bottom: 15px; } .draw2 { stroke-dasharray: 1000; stroke-dashoffset: 1000; -webkit-animation: dash2 10s linear forwards; animation: dash2 10s linear forwards; } .kanjisvg:hover { cursor: pointer; } @-webkit-keyframes dash2 { to { stroke-dashoffset: 0;} } @keyframes dash2 { to { stroke-dashoffset: 0;} } ================================================ FILE: html/assets/css/page/multiPage/markdown.css ================================================ h1 { font-size: 1.5rem; } h2 { font-size: 1rem; } p { font-size: 14px; } .md-center { display: flex; justify-content: center; margin-top: 10px; } ================================================ FILE: html/assets/css/page/namePage.css ================================================ .kanji-preview.small { font-size: x-large; } ================================================ FILE: html/assets/css/page/newsPage.css ================================================ #news-list { display: flex; flex-direction: column; align-items: center; } .news-container { display: block; width: 80%; margin-top: 2rem; border-radius: 1rem; background-color: rgba(249, 249, 249, 0.75); box-shadow: 0px 8px 20px 0px var(--backgroundShadow); } :root.dark .news-container { background-color: rgba(47, 49, 55, 0.75); } .news-head { background-color: var(--bgPrimaryColor); background: linear-gradient(45deg,var(--bgPrimaryColor),var(--primaryColor)); color: var(--secondaryTextColor); width: 100%; height: 50px; border-top-left-radius: 1rem; border-top-right-radius: 1rem; text-align: center; font-size: 25px; } .news-head > span { position: relative; top: 5px; } .news-date { position: relative; text-align: right; margin: 10px 10px -30px; color: var(--tagColor); } @media only screen and (max-width: 600px) { .news-date { margin: 5px 10px -20px; } } .news-body { padding: 1.125rem; } ================================================ FILE: html/assets/css/page/sentencePage.css ================================================ .furigana-kanji-container { text-align: left; } .inline-kana-preview.small, .kanji-preview.small, .inline-kana-preview.small { font-size: x-large; } .sentence-translation.original { font-size: 19px; padding-top: 0.2rem; } .sentence-toggle { width: 100%; margin-top: -10px; margin-bottom: -10px; color: var(--primaryColor); font-family: "Roboto"; text-align: end; font-size: small; cursor: pointer; } .sentence-share { display: flex; flex-direction: column; gap: 0.5em; width: 100%; font-size: 12px; margin-top: -10px; margin-bottom: -12px; padding-top: 5px; padding-right: 5px; color: var(--primaryColor); -webkit-writing-mode: tb; -ms-writing-mode: tb; writing-mode: tb; } .sentence-share > .searchSvg { cursor: pointer; color: var(--disabledColor); background-color: var(--disabledColor); } ================================================ FILE: html/assets/css/page/wordExtensions/searchAnnotation.css ================================================ .search-annotation { color: var(--tagColor); max-height: 150px; margin-bottom: 15px; overflow: auto; display: flex; justify-content: center; } .search-annotation .kanji-preview { line-height: unset; } .search-annotation.no-center { justify-content: unset; } .search-annotation::-webkit-scrollbar { width: 0px; } .search-inflection { padding: 0.5rem 1rem 0.5rem 1rem; } .search-inflection > span > .forms { position: relative; left: 5px; top: 5px; } /* Mobile only */ @media only screen and (max-width: 600px) { .search-annotation { margin: -15px 0 0 5vw; } .search-inflection { padding: 0.5rem 0rem 0.5rem 0rem; width: 80vw; } } ================================================ FILE: html/assets/css/page/wordExtensions/sentenceReader.css ================================================ #sr { justify-content: center; } .sentence-part { color: var(--primaryTextColor) !important; border-bottom: 2px solid var(--lineColor); line-height: 1.1; margin-right: 15px; margin-top: 9px; flex-grow: 0; flex-shrink: 0; overflow: hidden; cursor: pointer; } .sentence-part.selected { border-bottom: 2px solid var(--primaryColor); } .sentence-part:hover { text-decoration: unset; text-shadow: -1px -1px var(--primaryTextColor); color: var(--primaryTextColor) !important; } .sentence-part.inline-kana-preview { margin-top: 19px; } .sentence-part.symbol { border-bottom: 0px; margin-right: 0px; margin-left: 0px; } .sentence-part:not(.symbol) + .symbol { margin-left: -15px; margin-right: 10px; } .sentence-part.particle { color: var(--primaryColor) !important; } .sentence-part.particle:hover { text-shadow: -1px -1px var(--primaryColor); } .sentence-part:empty { padding-top: 19px; } .sentence-part > .furigana-kanji-container { margin-top: 24px; } .sentence-part > .inline-kana-preview { margin-top: 9px; } @media only screen and (max-width: 600px) { #sr { scrollbar-width: none; flex-wrap: nowrap; } #sr::-webkit-scrollbar { width: 0px; } .sentence-part { min-width: -webkit-max-content; min-width: -moz-max-content; min-width: max-content; margin-right: 20px; } } ================================================ FILE: html/assets/css/page/wordPage.css ================================================ .title-div { text-align: center; text-align: -webkit-center; width: 80%; } .title-div h1 { font-weight: bold; font-size: xx-large; } .title-div h4 { font-size: medium; } .main-tab-select { padding-top: 25px; width: 100%; } .main-tab-select.l { padding-right: 10px; display: flex; justify-content: flex-start; } .main-tab-select.r { padding-left: 10px; display: flex; justify-content: flex-end; } .main-tab-select h2 { font-size: medium; text-decoration: underline; color: var(--primaryColor); } .tab-btn { cursor: pointer; width: -webkit-max-content; width: -moz-max-content; width: max-content; } .entry-min-height-1 { min-height: 90px; } .entry-min-height-2 { min-height: 180px; } .main-info > .d-flex.flex-row { padding-left: 1vw; } .definition-wrapper { max-width: 93%; } /* Example Sentences */ .example-sentence { padding-top: 5px; margin-bottom: 5px !important; max-width: 90%; overflow-y: hidden; } .example-sentence.collapsed { max-height: 35px; } .example-sentence .wrap div { margin-top: -5px; } .expander { height: 0; width: 0; margin-top: 24px; margin-left: 5px; border-style: solid; border-width: 5px; border-color: var(--tagColor) transparent transparent transparent; } .expander.on { transform: rotate(180deg); transform-origin: top center; margin-top: 29px; } .expander:hover { cursor: pointer; } .tags .kanji-preview { font-size: 15px; } .furigana-preview { font-size: inherit; } .tags .furigana-preview { font-size: 10px; margin-bottom: 5px; margin-top: 2px; } .tags .inline-kana-preview { font-size: 15px; padding-top: 18px; } /* Kanji Stuff Overwrite */ .kanji-entry.left.fixed { min-width: 155px; max-width: 155px; justify-content: center; align-items: center; } .kanji-preview.large { font-size: 50px; padding-top: 15px; } .kanji-preview.large:hover { text-decoration: none; } .kanji-entry { padding-bottom: 5px; } /* 3-dot Menu */ .dot-menu { width: 100%; } .mdl-menu__item[disabled] + .mdl-menu__item[disabled], .mdl-menu__item:last-child[disabled], .mdl-menu__item:first-child[disabled] { display: none; } .mdl-menu__item[disabled] { height: 10px; } .mdl-menu__item[disabled] > hr { margin: 0px; margin-top: 5px; } #info-dropdown { background-color: var(--background); width: -webkit-max-content; width: -moz-max-content; width: max-content; cursor: pointer; } .info-entry { padding-left: 5px; padding-right: 5px; font-size: 18px; line-height: 1.4; cursor: pointer; text-align: center; display: flex; flex-direction: row; align-items: center; width: auto; } .info-entry:hover { background-color: var(--lineColor); } .info-entry > * { margin: 0 5px; } .info-entry > div { margin-top: 1px; } .info-entry a { vertical-align: middle; } .info-entry > .extra { border-left: 1px solid var(--lineColor); padding-left: 7px; margin-left: 1px; } .info-entry .copySvg { pointer-events: all; } .word-tooltip { position: absolute; cursor: pointer; top: 0px; right: 10px; } .word-tooltip > span { margin-bottom: -20px; } .mdl-menu__container { margin-right: 10px; } /* -------- Words Column -------- */ .word-frequency { margin-top: 10px; width: 100px; clear: right; margin: 4px 0 8px 0; padding: 2px 5px 3px 5px; font-size: 10px; -webkit-font-smoothing: antialiased; background-color: var(--secondaryColor); border-radius: 3px; color: var(--secondaryTextColor); font-weight: bold; text-align: center; text-align: -webkit-center; } .word-frequency.common { background-color: var(--bgPrimaryColor); } .kanji-entry .list-entry + .list-entry { padding-top: 5px; } /* Pitch Accent Borders */ .pitch { border-radius: 0px; margin-right: -4px; font-size: large; color: var(--primaryColor); } .pitch.t { border-top: 1px solid var(--tagColor); padding-left: 5px; padding-right: 5px; } .pitch.r { border-right: 1px solid var(--tagColor); margin-right: -6px; } .pitch.b { border-bottom: 1px solid var(--tagColor); } .pitch.b:not(.r) { padding-left: 3px; } /* Info Overlay */ .table.conjugation, .table.collocation { width: 80%; margin-left: 10%; } .table.collocation tr:first-child > th { border-top: 2px solid var(--lineColor) !important; } .table.collocation th, .table.collocation td { width: 50%; } .table { color: var(--primaryTextColor) !important; } thead > tr > th { color: var(--secondaryTextColor) !important; background-color: var(--bgPrimaryColor) !important; } table th { padding: 0.75rem; border-color: var(--searchTextColor) !important; border-bottom: 2px solid var(--lineColor) !important; border-top: 1px solid var(--lineColor) !important; } td { border-top: 2px solid var(--lineColor) !important; } table tr:last-child > td { border-bottom: 2px solid var(--lineColor) !important; } /* -------- Kanji Column -------- */ .translation.kanji { padding-top: 10px; padding-left: 10px; } ================================================ FILE: html/assets/css/search/choices.css ================================================ /* Used by the search bar only. So much css for a damn text field.. */ .choices { position: relative; margin-bottom: 24px; font-size: 16px; } .choices:focus { outline: none; } .choices:last-child { margin-bottom: 0; } .choices__item--choice:hover { color: var(--primaryColor) !important; } .choices[data-type*="select-one"] { cursor: pointer; } .choices[data-type*="select-one"] .choices__inner { padding-bottom: 7.5px; } .choices[data-type*="select-one"]:after { content: ""; height: 0; width: 0; border-style: solid; border-width: 5px; border-color: var(--tagColor) transparent transparent transparent; position: absolute; right: 30px; top: 50%; margin-top: -2.5px; pointer-events: none; } .choices[data-type*="select-one"].is-open:after { border-color: transparent transparent var(--tagColor) transparent !important; margin-top: -7.5px; } .choices__inner { display: inline-block; vertical-align: top; width: 100%; padding: 7.5px 7.5px 3.75px; border-radius: 2.5px; font-size: 14px; min-height: 44px; overflow: hidden; } .is-open .choices__inner { border-radius: 2.5px 2.5px 0 0; } .choices__list { margin: 0; padding-left: 0; list-style: none; } .choices__list--single { display: inline-block; padding: 4px 16px 4px 4px; width: 100%; } .choices__list--single .choices__item { width: 100%; } .choices__list--dropdown { display: none; z-index: 1; position: absolute; width: 100%; background-color: var(--searchBackground); border: 1px solid var(--searchBackground); top: 100%; margin-top: -1px; border-bottom-left-radius: 2.5px; border-bottom-right-radius: 2.5px; overflow: hidden; word-break: break-all; } .choices__list--dropdown.is-active { display: block; } .choices__list--dropdown .choices__list { position: relative; max-height: 300px; overflow: auto; overflow: -moz-hidden-unscrollable; -webkit-overflow-scrolling: touch; will-change: scroll-position; } .choices__list--dropdown .choices__item { position: relative; padding: 10px; font-size: 14px; } @media (min-width: 640px) { .choices__list--dropdown .choices__item--selectable { padding-right: 100px; } .choices__list--dropdown .choices__item--selectable:after { content: attr(data-select-text); font-size: 12px; opacity: 0; position: absolute; right: 10px; top: 50%; transform: translateY(-50%); } } .choices__item { cursor: default; } .choices__item--selectable { cursor: pointer; } .choices__item--disabled { cursor: not-allowed; -webkit-user-select: none; -ms-user-select: none; -moz-user-select: none; user-select: none; opacity: 0.5; } .choices__input { display: inline-block; vertical-align: baseline; font-size: 14px; margin-bottom: 5px; border: 0; border-radius: 0; max-width: 100%; padding: 4px 0 4px 2px; } .choices__input:focus { outline: 0; } .choices__button:focus { outline: none; } /* ----------------- Settings specific changes -------------- */ .modal-body .choices { margin-bottom: 0px; } .modal-body .choices__inner { width: -webkit-fit-content; width: fit-content; width: -moz-fit-content; } .modal-body .choices__list.choices__list--single { box-shadow: 0px 1px 2px 0px var(--backgroundShadow); border: 1px solid var(--backgroundShadow); padding-right: 20px; } .modal-body .choices:after { right: 15px; margin-left: unset; } .modal-body .choices__list.choices__list--dropdown { width: -webkit-max-content; width: -moz-max-content; width: max-content; border: 0; margin-top: 2px; border-radius: 4px; box-shadow: 0px 8px 20px 0px var(--backgroundShadow); } .modal-body .choices__list--dropdown .choices__item { padding: 6px 17px 10px 10px; } @media (min-width: 640px) { .modal-body .choices__list--dropdown .choices__item--selectable { width: -webkit-max-content; width: -moz-max-content; width: max-content; padding-right: 10px; } .modal-body .choices ::-webkit-scrollbar { width: 10px; } .modal-body .choices ::-webkit-scrollbar-track { background: var(--lineColor); } .modal-body .choices ::-webkit-scrollbar-thumb { background: var(--itemBG_075); } .search-lang-txt { position: absolute; margin-top: -30px; right: 8rem; } } /* ----------------- Search Bar specific changes -------------- */ .searchDivInner form .inner-form { background: var(--searchBackground); display: flex; width: 100%; justify-content: space-between; align-items: center; box-shadow: 0px 8px 20px 0px var(--backgroundShadow); border-radius: 20px; } .searchDivInner form .inner-form .input-field { height: 68px; } .searchDivInner form .inner-form .input-field input { height: 100%; width: 100%; background: transparent; border: 0; display: block; padding: 10px 32px; margin-right: 80px; font-size: 16px; color: var(--searchTextColor); } .searchDivInner form .inner-form .input-field input.placeholder { color: var(--tagColor); font-size: 16px; } .searchDivInner form .inner-form .input-field input:-moz-placeholder { color: var(--tagColor); font-size: 16px; } .searchDivInner form .inner-form .input-field input::-webkit-input-placeholder { color: var(--tagColor); font-size: 16px; } .searchDivInner form .inner-form .input-field.first-wrap { width: 200px; border-right: 1px solid var(--lineColor); } .searchDivInner form .inner-form .input-field.first-wrap .choices__inner { background: transparent; border-radius: 0; border: 0; height: 100%; display: flex; align-items: center; padding: 10px 30px; } .searchDivInner form .inner-form .input-field.first-wrap .choices__inner .choices__list.choices__list--single { display: flex; padding: 0; align-items: center; height: 100%; } .searchDivInner form .inner-form .input-field.first-wrap .choices__inner .choices__item.choices__item--selectable.choices__placeholder { display: flex; align-items: center; height: 100%; opacity: 1; color: var(--tagColor); } .searchDivInner form .inner-form .input-field.first-wrap .choices__inner .choices__list--single .choices__item { display: flex; align-items: center; height: 100%; color: var(--searchTextColor); } .searchDivInner form .inner-form .input-field input:hover, .searchDivInner form .inner-form .input-field input:focus { box-shadow: none; outline: 0; } .searchDivInner form .inner-form .input-field.first-wrap .choices__list.choices__list--dropdown { border: 0; margin-top: 2px; border-radius: 4px; box-shadow: 0px 8px 20px 0px var(--backgroundShadow); } .searchDivInner form .inner-form .input-field.first-wrap .choices__list.choices__list--dropdown .choices__item--selectable { padding-right: 0; } .searchDivInner form .inner-form .input-field.first-wrap .choices__list--dropdown .choices__item { color: var(--searchTextColor); min-height: 24px; } .searchDivInner form .inner-form .input-field.second-wrap { flex-grow: 1; } .searchDivInner form .inner-form .input-field.third-wrap { /* width: 74px; */ width: 30px; } .btn-search { height: 100%; width: 100%; white-space: nowrap; border: 0; cursor: pointer; color: var(--searchBackground); background: var(--bgPrimaryColor); transition: all .2s ease-out, color .2s ease-out; } .btn-search svg { width: 16px; } .btn-search:hover { background: var(--primaryColor); } .btn-search:focus { outline: 0; box-shadow: none; } .searchDivInner form .inner-form .input-field .input-select { height: 100%; } .searchDivInner form .inner-form .input-field .input-select .choices { height: 100%; } .searchDivInner form .inner-form .input-field { height: 50px; } .btn-search > svg > path { fill: var(--secondaryTextColor); } .searchDivInner .choices__list--dropdown > .choices__list { margin-left: 10px; } .choices__list.choices__list--dropdown.index.is-active { -webkit-animation: dropdownAnim 0.2s linear forwards; animation: dropdownAnim 0.2s linear forwards; } .choices__list.choices__list--dropdown.index.animate:not(.is-active) { display: unset !important; -webkit-animation: dropdownAnimClose 0.2s linear forwards; animation: dropdownAnimClose 0.2s linear forwards; } @-webkit-keyframes dropdownAnim { from {height: 0px;} to { height: 330%;} } @keyframes dropdownAnim { from {height: 0px;} to { height: 330%;} } @-webkit-keyframes dropdownAnimClose { from {height: 330%; } to { height: 0%; display:hidden !important;} } @keyframes dropdownAnimClose { from {height: 330%; } to { height: 0%; display:hidden !important;} } .choices.main[data-type*="select-one"]:after { transition: linear 0.2s; } .choices.main[data-type*="select-one"].is-open:after { transition: linear 0.2s; } /* Mobile Only */ @media only screen and (max-width: 600px) { .choices[data-type*="select-one"]:after { right: 14px; } } /* Dekstop Only */ @media only screen and (min-width: 600px) { .searchDivInner form .inner-form .input-field.first-wrap .choices__list.choices__list--dropdown { border-radius: 20px !important; border-top-right-radius: 0px !important; } } ================================================ FILE: html/assets/css/search/searchRow.css ================================================ #search-row { padding-top: 10px; padding-left: 10px; padding-right: 10px; } #emptyInput { position: absolute; height: 30px; margin-top: 7px; right: 70px; } #emptyInput:focus { outline: none; box-shadow: none; } #emptyInput > svg { width: 20px; fill: var(--tagColor); } #search-vl { position: absolute; border-left: 1px solid var(--lineColor); height: 70%; margin-top: 7px; right: 75px; } .search-embedded-btn:focus { outline: none; box-shadow: none; } .search-embedded-btn { background: unset; position: absolute; height: 30px; margin-top: 11px; right: 10px; width: unset; border: none; } .search-embedded-btn.search { width: 30px; right: 14px; display: none; } #voiceBtn.search-embedded-btn { display: none; } .search-embedded-btn.search > svg > path, #voiceBtn.search-embedded-btn > svg > path { fill: var(--primaryColor); } .search-embedded-btn.radical { right: 38px; } .search-embedded-btn.radical > span { font-size: 20px; text-shadow: 0px 0px var(--tagColor); color: var(--tagColor); } .input-group { height: 100%; } svg { fill: var(--searchTextColor); } .kanjisvg > path { stroke: var(--primaryTextColor); } .d-flex.center { justify-content: center; } .btn-container { position: absolute; width: 100%; top: 0; text-align: center; text-align: -webkit-center; } .btn-container div { float: left; width: 30px; } .btn-container.rad { margin-top: -8px; margin-right: -7px; } .settingsBtn, .infoBtn, .homeBtn { position: absolute; cursor: pointer; z-index: 15; } .settingsBtn { top: 22px; left: 20px; padding-top: 12px; padding-bottom: 0px; } .infoBtn { top: 21px; right: 3em; } .homeBtn { font-family: 'Material Icons'; font-size: 27px; top: 16px; right: 0.5em; color: var(--tagColor); } .homeBtn.mobile { position: relative; font-size: 38px; top: 0; left: 0; right: 0; } .rad-picker-icon { font-size: 33px; color: var(--searchTextColor); } .rad-picker-txt { margin-top: -9px; } #searchDiv { z-index: 1; margin-left: 15px; width: 100%; max-width: 1150px; position: relative; } #searchDiv.index { max-width: 1150px; margin-left: 0px; } .d-flex.left { flex-direction: row; justify-content: left; } /* Adjustments for the upper buttons */ @media only screen and (max-width: 1350px) { #searchDiv { width: 90%; padding-right: 30px; } } @media only screen and (max-width: 875px) { #searchDiv { padding-right: 45px; } .homeBtn { right: 0.25em; } .infoBtn { right: 2.5em; } } @media only screen and (max-width: 600px) { #searchDiv { width: 100%; padding-right: 0px; } } ================================================ FILE: html/assets/css/tools/alerts.css ================================================ /* ----------------- Alerts Color Design ----------------- */ .msg-message { border: none !important; border-radius: 15px !important; text-shadow: none !important; } .msg-warning { background-color: rgba(195,195,195,0.95) !important; border-color: rgba(195,195,195,0.95) !important; } .mdl-tooltip { font-size: 12px; } ================================================ FILE: html/assets/css/tools/pagination.css ================================================ .pagination { display: flex; list-style: none; justify-content: center; padding: 0px 0 25px 0; margin-top: -45px; } .pagination-item { font-family: 'Roboto', sans-serif; display: flex; padding-left: 0; list-style: none; border-radius: .25rem; background-color: none; } .pagination-item.disabled .pagination-circle { cursor: unset; color: var(--tagColor); } .pagination-item.disabled .pagination-circle:not(.active):hover { animation: none !important; -webkit-animation: none !important; } .pagination-circle { color: var(--primaryTextColor); background: none; margin-right: 2px; margin-left: 2px; line-height: 1.25; padding: .5rem .75rem; font-size: .9rem; border: 0; border-radius: 50%; outline: 0 !important; } .pagination-circle:not(.active):hover { -webkit-animation: hoverEffect 0.5s forwards; animation: hoverEffect 0.5s forwards; } .pagination-circle.active { color: var(--secondaryTextColor); background: var(--bgPrimaryColor); border-radius: 50%; box-shadow: 0 2px 5px 0 rgb(0 0 0 / 16%), 0 2px 10px 0 rgb(0 0 0 / 12%); } @-webkit-keyframes hoverEffect { to { background-color: var(--lineColor); } } @keyframes hoverEffect { to { background-color: var(--lineColor); } } ================================================ FILE: html/assets/css/tools/ripple.css ================================================ .has-ripple{position:relative;overflow:hidden;-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}.ripple-a{display:block;position:absolute;pointer-events:none;border-radius:50%;-webkit-transform:scale(0);-o-transform:scale(0);transform:scale(0);background:#fff;opacity:1}.ripple-animate{-webkit-animation:ripple;-o-animation:ripple;animation:ripple}@-webkit-keyframes ripple{100%{opacity:0;-webkit-transform:scale(2);transform:scale(2)}}@-o-keyframes ripple{100%{opacity:0;-o-transform:scale(2);transform:scale(2)}}@keyframes ripple{100%{opacity:0;transform:scale(2)}} ================================================ FILE: html/assets/docs.html ================================================ ================================================ FILE: html/assets/fonts/fonts.css ================================================ @font-face { font-family: 'Roboto'; font-style: normal; font-weight: 300; font-display: swap; src: url("roboto.woff2") format('woff2'); unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; } @font-face { font-family: 'Material Icons'; font-style: normal; font-weight: 400; src: url("materialFont.woff2") format('woff2'); } .material-icons { font-family: 'Material Icons'; font-weight: normal; font-style: normal; font-size: 24px; line-height: 1; letter-spacing: normal; text-transform: none; display: inline-block; white-space: nowrap; word-wrap: normal; direction: ltr; -webkit-font-feature-settings: 'liga'; -webkit-font-smoothing: antialiased; } ================================================ FILE: html/assets/js/lib/d3.js ================================================ !function(){function n(n){return n&&(n.ownerDocument||n.document||n).documentElement}function t(n){return n&&(n.ownerDocument&&n.ownerDocument.defaultView||n.document&&n||n.defaultView)}function e(n,t){return t>n?-1:n>t?1:n>=t?0:NaN}function r(n){return null===n?NaN:+n}function i(n){return!isNaN(n)}function u(n){return{left:function(t,e,r,i){for(arguments.length<3&&(r=0),arguments.length<4&&(i=t.length);i>r;){var u=r+i>>>1;n(t[u],e)<0?r=u+1:i=u}return r},right:function(t,e,r,i){for(arguments.length<3&&(r=0),arguments.length<4&&(i=t.length);i>r;){var u=r+i>>>1;n(t[u],e)>0?i=u:r=u+1}return r}}}function o(n){return n.length}function a(n){for(var t=1;n*t%1;)t*=10;return t}function l(n,t){for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}function c(){this._=Object.create(null)}function f(n){return(n+="")===bo||n[0]===_o?_o+n:n}function s(n){return(n+="")[0]===_o?n.slice(1):n}function h(n){return f(n)in this._}function p(n){return(n=f(n))in this._&&delete this._[n]}function g(){var n=[];for(var t in this._)n.push(s(t));return n}function v(){var n=0;for(var t in this._)++n;return n}function d(){for(var n in this._)return!1;return!0}function y(){this._=Object.create(null)}function m(n){return n}function M(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function x(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.slice(1);for(var e=0,r=wo.length;r>e;++e){var i=wo[e]+t;if(i in n)return i}}function b(){}function _(){}function w(n){function t(){for(var t,r=e,i=-1,u=r.length;++ie;e++)for(var i,u=n[e],o=0,a=u.length;a>o;o++)(i=u[o])&&t(i,o,e);return n}function Z(n){return ko(n,qo),n}function V(n){var t,e;return function(r,i,u){var o,a=n[u].update,l=a.length;for(u!=e&&(e=u,t=0),i>=t&&(t=i+1);!(o=a[t])&&++t0&&(n=n.slice(0,a));var c=To.get(n);return c&&(n=c,l=B),a?t?i:r:t?b:u}function $(n,t){return function(e){var r=ao.event;ao.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{ao.event=r}}}function B(n,t){var e=$(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function W(e){var r=".dragsuppress-"+ ++Do,i="click"+r,u=ao.select(t(e)).on("touchmove"+r,S).on("dragstart"+r,S).on("selectstart"+r,S);if(null==Ro&&(Ro="onselectstart"in e?!1:x(e.style,"userSelect")),Ro){var o=n(e).style,a=o[Ro];o[Ro]="none"}return function(n){if(u.on(r,null),Ro&&(o[Ro]=a),n){var t=function(){u.on(i,null)};u.on(i,function(){S(),t()},!0),setTimeout(t,0)}}}function J(n,e){e.changedTouches&&(e=e.changedTouches[0]);var r=n.ownerSVGElement||n;if(r.createSVGPoint){var i=r.createSVGPoint();if(0>Po){var u=t(n);if(u.scrollX||u.scrollY){r=ao.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var o=r[0][0].getScreenCTM();Po=!(o.f||o.e),r.remove()}}return Po?(i.x=e.pageX,i.y=e.pageY):(i.x=e.clientX,i.y=e.clientY),i=i.matrixTransform(n.getScreenCTM().inverse()),[i.x,i.y]}var a=n.getBoundingClientRect();return[e.clientX-a.left-n.clientLeft,e.clientY-a.top-n.clientTop]}function G(){return ao.event.changedTouches[0].identifier}function K(n){return n>0?1:0>n?-1:0}function Q(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function nn(n){return n>1?0:-1>n?Fo:Math.acos(n)}function tn(n){return n>1?Io:-1>n?-Io:Math.asin(n)}function en(n){return((n=Math.exp(n))-1/n)/2}function rn(n){return((n=Math.exp(n))+1/n)/2}function un(n){return((n=Math.exp(2*n))-1)/(n+1)}function on(n){return(n=Math.sin(n/2))*n}function an(){}function ln(n,t,e){return this instanceof ln?(this.h=+n,this.s=+t,void(this.l=+e)):arguments.length<2?n instanceof ln?new ln(n.h,n.s,n.l):_n(""+n,wn,ln):new ln(n,t,e)}function cn(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?u+(o-u)*n/60:180>n?o:240>n?u+(o-u)*(240-n)/60:u}function i(n){return Math.round(255*r(n))}var u,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,u=2*e-o,new mn(i(n+120),i(n),i(n-120))}function fn(n,t,e){return this instanceof fn?(this.h=+n,this.c=+t,void(this.l=+e)):arguments.length<2?n instanceof fn?new fn(n.h,n.c,n.l):n instanceof hn?gn(n.l,n.a,n.b):gn((n=Sn((n=ao.rgb(n)).r,n.g,n.b)).l,n.a,n.b):new fn(n,t,e)}function sn(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),new hn(e,Math.cos(n*=Yo)*t,Math.sin(n)*t)}function hn(n,t,e){return this instanceof hn?(this.l=+n,this.a=+t,void(this.b=+e)):arguments.length<2?n instanceof hn?new hn(n.l,n.a,n.b):n instanceof fn?sn(n.h,n.c,n.l):Sn((n=mn(n)).r,n.g,n.b):new hn(n,t,e)}function pn(n,t,e){var r=(n+16)/116,i=r+t/500,u=r-e/200;return i=vn(i)*na,r=vn(r)*ta,u=vn(u)*ea,new mn(yn(3.2404542*i-1.5371385*r-.4985314*u),yn(-.969266*i+1.8760108*r+.041556*u),yn(.0556434*i-.2040259*r+1.0572252*u))}function gn(n,t,e){return n>0?new fn(Math.atan2(e,t)*Zo,Math.sqrt(t*t+e*e),n):new fn(NaN,NaN,n)}function vn(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function dn(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function yn(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function mn(n,t,e){return this instanceof mn?(this.r=~~n,this.g=~~t,void(this.b=~~e)):arguments.length<2?n instanceof mn?new mn(n.r,n.g,n.b):_n(""+n,mn,cn):new mn(n,t,e)}function Mn(n){return new mn(n>>16,n>>8&255,255&n)}function xn(n){return Mn(n)+""}function bn(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function _n(n,t,e){var r,i,u,o=0,a=0,l=0;if(r=/([a-z]+)\((.*)\)/.exec(n=n.toLowerCase()))switch(i=r[2].split(","),r[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Nn(i[0]),Nn(i[1]),Nn(i[2]))}return(u=ua.get(n))?t(u.r,u.g,u.b):(null==n||"#"!==n.charAt(0)||isNaN(u=parseInt(n.slice(1),16))||(4===n.length?(o=(3840&u)>>4,o=o>>4|o,a=240&u,a=a>>4|a,l=15&u,l=l<<4|l):7===n.length&&(o=(16711680&u)>>16,a=(65280&u)>>8,l=255&u)),t(o,a,l))}function wn(n,t,e){var r,i,u=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-u,l=(o+u)/2;return a?(i=.5>l?a/(o+u):a/(2-o-u),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=NaN,i=l>0&&1>l?0:r),new ln(r,i,l)}function Sn(n,t,e){n=kn(n),t=kn(t),e=kn(e);var r=dn((.4124564*n+.3575761*t+.1804375*e)/na),i=dn((.2126729*n+.7151522*t+.072175*e)/ta),u=dn((.0193339*n+.119192*t+.9503041*e)/ea);return hn(116*i-16,500*(r-i),200*(i-u))}function kn(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Nn(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function En(n){return"function"==typeof n?n:function(){return n}}function An(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),Cn(t,e,n,r)}}function Cn(n,t,e,r){function i(){var n,t=l.status;if(!t&&Ln(l)||t>=200&&300>t||304===t){try{n=e.call(u,l)}catch(r){return void o.error.call(u,r)}o.load.call(u,n)}else o.error.call(u,l)}var u={},o=ao.dispatch("beforesend","progress","load","error"),a={},l=new XMLHttpRequest,c=null;return!this.XDomainRequest||"withCredentials"in l||!/^(http(s)?:)?\/\//.test(n)||(l=new XDomainRequest),"onload"in l?l.onload=l.onerror=i:l.onreadystatechange=function(){l.readyState>3&&i()},l.onprogress=function(n){var t=ao.event;ao.event=n;try{o.progress.call(u,l)}finally{ao.event=t}},u.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",u)},u.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",u):t},u.responseType=function(n){return arguments.length?(c=n,u):c},u.response=function(n){return e=n,u},["get","post"].forEach(function(n){u[n]=function(){return u.send.apply(u,[n].concat(co(arguments)))}}),u.send=function(e,r,i){if(2===arguments.length&&"function"==typeof r&&(i=r,r=null),l.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),l.setRequestHeader)for(var f in a)l.setRequestHeader(f,a[f]);return null!=t&&l.overrideMimeType&&l.overrideMimeType(t),null!=c&&(l.responseType=c),null!=i&&u.on("error",i).on("load",function(n){i(null,n)}),o.beforesend.call(u,l),l.send(null==r?null:r),u},u.abort=function(){return l.abort(),u},ao.rebind(u,o,"on"),null==r?u:u.get(zn(r))}function zn(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Ln(n){var t=n.responseType;return t&&"text"!==t?n.response:n.responseText}function qn(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var i=e+t,u={c:n,t:i,n:null};return aa?aa.n=u:oa=u,aa=u,la||(ca=clearTimeout(ca),la=1,fa(Tn)),u}function Tn(){var n=Rn(),t=Dn()-n;t>24?(isFinite(t)&&(clearTimeout(ca),ca=setTimeout(Tn,t)),la=0):(la=1,fa(Tn))}function Rn(){for(var n=Date.now(),t=oa;t;)n>=t.t&&t.c(n-t.t)&&(t.c=null),t=t.n;return n}function Dn(){for(var n,t=oa,e=1/0;t;)t.c?(t.t8?function(n){return n/e}:function(n){return n*e},symbol:n}}function jn(n){var t=n.decimal,e=n.thousands,r=n.grouping,i=n.currency,u=r&&e?function(n,t){for(var i=n.length,u=[],o=0,a=r[0],l=0;i>0&&a>0&&(l+a+1>t&&(a=Math.max(1,t-l)),u.push(n.substring(i-=a,i+a)),!((l+=a+1)>t));)a=r[o=(o+1)%r.length];return u.reverse().join(e)}:m;return function(n){var e=ha.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"-",l=e[4]||"",c=e[5],f=+e[6],s=e[7],h=e[8],p=e[9],g=1,v="",d="",y=!1,m=!0;switch(h&&(h=+h.substring(1)),(c||"0"===r&&"="===o)&&(c=r="0",o="="),p){case"n":s=!0,p="g";break;case"%":g=100,d="%",p="f";break;case"p":g=100,d="%",p="r";break;case"b":case"o":case"x":case"X":"#"===l&&(v="0"+p.toLowerCase());case"c":m=!1;case"d":y=!0,h=0;break;case"s":g=-1,p="r"}"$"===l&&(v=i[0],d=i[1]),"r"!=p||h||(p="g"),null!=h&&("g"==p?h=Math.max(1,Math.min(21,h)):"e"!=p&&"f"!=p||(h=Math.max(0,Math.min(20,h)))),p=pa.get(p)||Fn;var M=c&&s;return function(n){var e=d;if(y&&n%1)return"";var i=0>n||0===n&&0>1/n?(n=-n,"-"):"-"===a?"":a;if(0>g){var l=ao.formatPrefix(n,h);n=l.scale(n),e=l.symbol+d}else n*=g;n=p(n,h);var x,b,_=n.lastIndexOf(".");if(0>_){var w=m?n.lastIndexOf("e"):-1;0>w?(x=n,b=""):(x=n.substring(0,w),b=n.substring(w))}else x=n.substring(0,_),b=t+n.substring(_+1);!c&&s&&(x=u(x,1/0));var S=v.length+x.length+b.length+(M?0:i.length),k=f>S?new Array(S=f-S+1).join(r):"";return M&&(x=u(k+x,k.length?f-b.length:1/0)),i+=v,n=x+b,("<"===o?i+n+k:">"===o?k+i+n:"^"===o?k.substring(0,S>>=1)+i+n+k.substring(S):i+(M?n:k+n))+e}}}function Fn(n){return n+""}function Hn(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function On(n,t,e){function r(t){var e=n(t),r=u(e,1);return r-t>t-e?e:r}function i(e){return t(e=n(new va(e-1)),1),e}function u(n,e){return t(n=new va(+n),e),n}function o(n,r,u){var o=i(n),a=[];if(u>1)for(;r>o;)e(o)%u||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{va=Hn;var r=new Hn;return r._=n,o(r,t,e)}finally{va=Date}}n.floor=n,n.round=r,n.ceil=i,n.offset=u,n.range=o;var l=n.utc=In(n);return l.floor=l,l.round=In(r),l.ceil=In(i),l.offset=In(u),l.range=a,n}function In(n){return function(t,e){try{va=Hn;var r=new Hn;return r._=t,n(r,e)._}finally{va=Date}}}function Yn(n){function t(n){function t(t){for(var e,i,u,o=[],a=-1,l=0;++aa;){if(r>=c)return-1;if(i=t.charCodeAt(a++),37===i){if(o=t.charAt(a++),u=C[o in ya?t.charAt(a++):o],!u||(r=u(n,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){_.lastIndex=0;var r=_.exec(t.slice(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){x.lastIndex=0;var r=x.exec(t.slice(e));return r?(n.w=b.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){N.lastIndex=0;var r=N.exec(t.slice(e));return r?(n.m=E.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.slice(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,A.c.toString(),t,r)}function l(n,t,r){return e(n,A.x.toString(),t,r)}function c(n,t,r){return e(n,A.X.toString(),t,r)}function f(n,t,e){var r=M.get(t.slice(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var s=n.dateTime,h=n.date,p=n.time,g=n.periods,v=n.days,d=n.shortDays,y=n.months,m=n.shortMonths;t.utc=function(n){function e(n){try{va=Hn;var t=new va;return t._=n,r(t)}finally{va=Date}}var r=t(n);return e.parse=function(n){try{va=Hn;var t=r.parse(n);return t&&t._}finally{va=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ct;var M=ao.map(),x=Vn(v),b=Xn(v),_=Vn(d),w=Xn(d),S=Vn(y),k=Xn(y),N=Vn(m),E=Xn(m);g.forEach(function(n,t){M.set(n.toLowerCase(),t)});var A={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return m[n.getMonth()]},B:function(n){return y[n.getMonth()]},c:t(s),d:function(n,t){return Zn(n.getDate(),t,2)},e:function(n,t){return Zn(n.getDate(),t,2)},H:function(n,t){return Zn(n.getHours(),t,2)},I:function(n,t){return Zn(n.getHours()%12||12,t,2)},j:function(n,t){return Zn(1+ga.dayOfYear(n),t,3)},L:function(n,t){return Zn(n.getMilliseconds(),t,3)},m:function(n,t){return Zn(n.getMonth()+1,t,2)},M:function(n,t){return Zn(n.getMinutes(),t,2)},p:function(n){return g[+(n.getHours()>=12)]},S:function(n,t){return Zn(n.getSeconds(),t,2)},U:function(n,t){return Zn(ga.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Zn(ga.mondayOfYear(n),t,2)},x:t(h),X:t(p),y:function(n,t){return Zn(n.getFullYear()%100,t,2)},Y:function(n,t){return Zn(n.getFullYear()%1e4,t,4)},Z:at,"%":function(){return"%"}},C={a:r,A:i,b:u,B:o,c:a,d:tt,e:tt,H:rt,I:rt,j:et,L:ot,m:nt,M:it,p:f,S:ut,U:Bn,w:$n,W:Wn,x:l,X:c,y:Gn,Y:Jn,Z:Kn,"%":lt};return t}function Zn(n,t,e){var r=0>n?"-":"",i=(r?-n:n)+"",u=i.length;return r+(e>u?new Array(e-u+1).join(t)+i:i)}function Vn(n){return new RegExp("^(?:"+n.map(ao.requote).join("|")+")","i")}function Xn(n){for(var t=new c,e=-1,r=n.length;++e68?1900:2e3)}function nt(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function tt(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function et(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function rt(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function it(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function ut(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function ot(n,t,e){ma.lastIndex=0;var r=ma.exec(t.slice(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function at(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=xo(t)/60|0,i=xo(t)%60;return e+Zn(r,"0",2)+Zn(i,"0",2)}function lt(n,t,e){Ma.lastIndex=0;var r=Ma.exec(t.slice(e,e+1));return r?e+r[0].length:-1}function ct(n){for(var t=n.length,e=-1;++e=0?1:-1,a=o*e,l=Math.cos(t),c=Math.sin(t),f=u*c,s=i*l+f*Math.cos(a),h=f*o*Math.sin(a);ka.add(Math.atan2(h,s)),r=n,i=l,u=c}var t,e,r,i,u;Na.point=function(o,a){Na.point=n,r=(t=o)*Yo,i=Math.cos(a=(e=a)*Yo/2+Fo/4),u=Math.sin(a)},Na.lineEnd=function(){n(t,e)}}function dt(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function yt(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function mt(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function Mt(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function xt(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function bt(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function _t(n){return[Math.atan2(n[1],n[0]),tn(n[2])]}function wt(n,t){return xo(n[0]-t[0])a;++a)i.point((e=n[a])[0],e[1]);return void i.lineEnd()}var l=new Tt(e,n,null,!0),c=new Tt(e,null,l,!1);l.o=c,u.push(l),o.push(c),l=new Tt(r,n,null,!1),c=new Tt(r,null,l,!0),l.o=c,u.push(l),o.push(c)}}),o.sort(t),qt(u),qt(o),u.length){for(var a=0,l=e,c=o.length;c>a;++a)o[a].e=l=!l;for(var f,s,h=u[0];;){for(var p=h,g=!0;p.v;)if((p=p.n)===h)return;f=p.z,i.lineStart();do{if(p.v=p.o.v=!0,p.e){if(g)for(var a=0,c=f.length;c>a;++a)i.point((s=f[a])[0],s[1]);else r(p.x,p.n.x,1,i);p=p.n}else{if(g){f=p.p.z;for(var a=f.length-1;a>=0;--a)i.point((s=f[a])[0],s[1])}else r(p.x,p.p.x,-1,i);p=p.p}p=p.o,f=p.z,g=!g}while(!p.v);i.lineEnd()}}}function qt(n){if(t=n.length){for(var t,e,r=0,i=n[0];++r0){for(b||(u.polygonStart(),b=!0),u.lineStart();++o1&&2&t&&e.push(e.pop().concat(e.shift())),p.push(e.filter(Dt))}var p,g,v,d=t(u),y=i.invert(r[0],r[1]),m={point:o,lineStart:l,lineEnd:c,polygonStart:function(){m.point=f,m.lineStart=s,m.lineEnd=h,p=[],g=[]},polygonEnd:function(){m.point=o,m.lineStart=l,m.lineEnd=c,p=ao.merge(p);var n=Ot(y,g);p.length?(b||(u.polygonStart(),b=!0),Lt(p,Ut,n,e,u)):n&&(b||(u.polygonStart(),b=!0),u.lineStart(),e(null,null,1,u),u.lineEnd()),b&&(u.polygonEnd(),b=!1),p=g=null},sphere:function(){u.polygonStart(),u.lineStart(),e(null,null,1,u),u.lineEnd(),u.polygonEnd()}},M=Pt(),x=t(M),b=!1;return m}}function Dt(n){return n.length>1}function Pt(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:b,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ut(n,t){return((n=n.x)[0]<0?n[1]-Io-Uo:Io-n[1])-((t=t.x)[0]<0?t[1]-Io-Uo:Io-t[1])}function jt(n){var t,e=NaN,r=NaN,i=NaN;return{lineStart:function(){n.lineStart(),t=1},point:function(u,o){var a=u>0?Fo:-Fo,l=xo(u-e);xo(l-Fo)0?Io:-Io),n.point(i,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(u,r),t=0):i!==a&&l>=Fo&&(xo(e-i)Uo?Math.atan((Math.sin(t)*(u=Math.cos(r))*Math.sin(e)-Math.sin(r)*(i=Math.cos(t))*Math.sin(n))/(i*u*o)):(t+r)/2}function Ht(n,t,e,r){var i;if(null==n)i=e*Io,r.point(-Fo,i),r.point(0,i),r.point(Fo,i),r.point(Fo,0),r.point(Fo,-i),r.point(0,-i),r.point(-Fo,-i),r.point(-Fo,0),r.point(-Fo,i);else if(xo(n[0]-t[0])>Uo){var u=n[0]a;++a){var c=t[a],f=c.length;if(f)for(var s=c[0],h=s[0],p=s[1]/2+Fo/4,g=Math.sin(p),v=Math.cos(p),d=1;;){d===f&&(d=0),n=c[d];var y=n[0],m=n[1]/2+Fo/4,M=Math.sin(m),x=Math.cos(m),b=y-h,_=b>=0?1:-1,w=_*b,S=w>Fo,k=g*M;if(ka.add(Math.atan2(k*_*Math.sin(w),v*x+k*Math.cos(w))),u+=S?b+_*Ho:b,S^h>=e^y>=e){var N=mt(dt(s),dt(n));bt(N);var E=mt(i,N);bt(E);var A=(S^b>=0?-1:1)*tn(E[2]);(r>A||r===A&&(N[0]||N[1]))&&(o+=S^b>=0?1:-1)}if(!d++)break;h=y,g=M,v=x,s=n}}return(-Uo>u||Uo>u&&-Uo>ka)^1&o}function It(n){function t(n,t){return Math.cos(n)*Math.cos(t)>u}function e(n){var e,u,l,c,f;return{lineStart:function(){c=l=!1,f=1},point:function(s,h){var p,g=[s,h],v=t(s,h),d=o?v?0:i(s,h):v?i(s+(0>s?Fo:-Fo),h):0;if(!e&&(c=l=v)&&n.lineStart(),v!==l&&(p=r(e,g),(wt(e,p)||wt(g,p))&&(g[0]+=Uo,g[1]+=Uo,v=t(g[0],g[1]))),v!==l)f=0,v?(n.lineStart(),p=r(g,e),n.point(p[0],p[1])):(p=r(e,g),n.point(p[0],p[1]),n.lineEnd()),e=p;else if(a&&e&&o^v){var y;d&u||!(y=r(g,e,!0))||(f=0,o?(n.lineStart(),n.point(y[0][0],y[0][1]),n.point(y[1][0],y[1][1]),n.lineEnd()):(n.point(y[1][0],y[1][1]),n.lineEnd(),n.lineStart(),n.point(y[0][0],y[0][1])))}!v||e&&wt(e,g)||n.point(g[0],g[1]),e=g,l=v,u=d},lineEnd:function(){l&&n.lineEnd(),e=null},clean:function(){return f|(c&&l)<<1}}}function r(n,t,e){var r=dt(n),i=dt(t),o=[1,0,0],a=mt(r,i),l=yt(a,a),c=a[0],f=l-c*c;if(!f)return!e&&n;var s=u*l/f,h=-u*c/f,p=mt(o,a),g=xt(o,s),v=xt(a,h);Mt(g,v);var d=p,y=yt(g,d),m=yt(d,d),M=y*y-m*(yt(g,g)-1);if(!(0>M)){var x=Math.sqrt(M),b=xt(d,(-y-x)/m);if(Mt(b,g),b=_t(b),!e)return b;var _,w=n[0],S=t[0],k=n[1],N=t[1];w>S&&(_=w,w=S,S=_);var E=S-w,A=xo(E-Fo)E;if(!A&&k>N&&(_=k,k=N,N=_),C?A?k+N>0^b[1]<(xo(b[0]-w)Fo^(w<=b[0]&&b[0]<=S)){var z=xt(d,(-y+x)/m);return Mt(z,g),[b,_t(z)]}}}function i(t,e){var r=o?n:Fo-n,i=0;return-r>t?i|=1:t>r&&(i|=2),-r>e?i|=4:e>r&&(i|=8),i}var u=Math.cos(n),o=u>0,a=xo(u)>Uo,l=ve(n,6*Yo);return Rt(t,e,l,o?[0,-n]:[-Fo,n-Fo])}function Yt(n,t,e,r){return function(i){var u,o=i.a,a=i.b,l=o.x,c=o.y,f=a.x,s=a.y,h=0,p=1,g=f-l,v=s-c;if(u=n-l,g||!(u>0)){if(u/=g,0>g){if(h>u)return;p>u&&(p=u)}else if(g>0){if(u>p)return;u>h&&(h=u)}if(u=e-l,g||!(0>u)){if(u/=g,0>g){if(u>p)return;u>h&&(h=u)}else if(g>0){if(h>u)return;p>u&&(p=u)}if(u=t-c,v||!(u>0)){if(u/=v,0>v){if(h>u)return;p>u&&(p=u)}else if(v>0){if(u>p)return;u>h&&(h=u)}if(u=r-c,v||!(0>u)){if(u/=v,0>v){if(u>p)return;u>h&&(h=u)}else if(v>0){if(h>u)return;p>u&&(p=u)}return h>0&&(i.a={x:l+h*g,y:c+h*v}),1>p&&(i.b={x:l+p*g,y:c+p*v}),i}}}}}}function Zt(n,t,e,r){function i(r,i){return xo(r[0]-n)0?0:3:xo(r[0]-e)0?2:1:xo(r[1]-t)0?1:0:i>0?3:2}function u(n,t){return o(n.x,t.x)}function o(n,t){var e=i(n,1),r=i(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function l(n){for(var t=0,e=d.length,r=n[1],i=0;e>i;++i)for(var u,o=1,a=d[i],l=a.length,c=a[0];l>o;++o)u=a[o],c[1]<=r?u[1]>r&&Q(c,u,n)>0&&++t:u[1]<=r&&Q(c,u,n)<0&&--t,c=u;return 0!==t}function c(u,a,l,c){var f=0,s=0;if(null==u||(f=i(u,l))!==(s=i(a,l))||o(u,a)<0^l>0){do c.point(0===f||3===f?n:e,f>1?r:t);while((f=(f+l+4)%4)!==s)}else c.point(a[0],a[1])}function f(i,u){return i>=n&&e>=i&&u>=t&&r>=u}function s(n,t){f(n,t)&&a.point(n,t)}function h(){C.point=g,d&&d.push(y=[]),S=!0,w=!1,b=_=NaN}function p(){v&&(g(m,M),x&&w&&E.rejoin(),v.push(E.buffer())),C.point=s,w&&a.lineEnd()}function g(n,t){n=Math.max(-Ha,Math.min(Ha,n)),t=Math.max(-Ha,Math.min(Ha,t));var e=f(n,t);if(d&&y.push([n,t]),S)m=n,M=t,x=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:b,y:_},b:{x:n,y:t}};A(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}b=n,_=t,w=e}var v,d,y,m,M,x,b,_,w,S,k,N=a,E=Pt(),A=Yt(n,t,e,r),C={point:s,lineStart:h,lineEnd:p,polygonStart:function(){a=E,v=[],d=[],k=!0},polygonEnd:function(){a=N,v=ao.merge(v);var t=l([n,r]),e=k&&t,i=v.length;(e||i)&&(a.polygonStart(),e&&(a.lineStart(),c(null,null,1,a),a.lineEnd()),i&&Lt(v,u,t,c,a),a.polygonEnd()),v=d=y=null}};return C}}function Vt(n){var t=0,e=Fo/3,r=ae(n),i=r(t,e);return i.parallels=function(n){return arguments.length?r(t=n[0]*Fo/180,e=n[1]*Fo/180):[t/Fo*180,e/Fo*180]},i}function Xt(n,t){function e(n,t){var e=Math.sqrt(u-2*i*Math.sin(t))/i;return[e*Math.sin(n*=i),o-e*Math.cos(n)]}var r=Math.sin(n),i=(r+Math.sin(t))/2,u=1+r*(2*i-r),o=Math.sqrt(u)/i;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/i,tn((u-(n*n+e*e)*i*i)/(2*i))]},e}function $t(){function n(n,t){Ia+=i*n-r*t,r=n,i=t}var t,e,r,i;$a.point=function(u,o){$a.point=n,t=r=u,e=i=o},$a.lineEnd=function(){n(t,e)}}function Bt(n,t){Ya>n&&(Ya=n),n>Va&&(Va=n),Za>t&&(Za=t),t>Xa&&(Xa=t)}function Wt(){function n(n,t){o.push("M",n,",",t,u)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function i(){o.push("Z")}var u=Jt(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return u=Jt(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Jt(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Gt(n,t){Ca+=n,za+=t,++La}function Kt(){function n(n,r){var i=n-t,u=r-e,o=Math.sqrt(i*i+u*u);qa+=o*(t+n)/2,Ta+=o*(e+r)/2,Ra+=o,Gt(t=n,e=r)}var t,e;Wa.point=function(r,i){Wa.point=n,Gt(t=r,e=i)}}function Qt(){Wa.point=Gt}function ne(){function n(n,t){var e=n-r,u=t-i,o=Math.sqrt(e*e+u*u);qa+=o*(r+n)/2,Ta+=o*(i+t)/2,Ra+=o,o=i*n-r*t,Da+=o*(r+n),Pa+=o*(i+t),Ua+=3*o,Gt(r=n,i=t)}var t,e,r,i;Wa.point=function(u,o){Wa.point=n,Gt(t=r=u,e=i=o)},Wa.lineEnd=function(){n(t,e)}}function te(n){function t(t,e){n.moveTo(t+o,e),n.arc(t,e,o,0,Ho)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function i(){a.point=t}function u(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:i,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=i,a.point=t},pointRadius:function(n){return o=n,a},result:b};return a}function ee(n){function t(n){return(a?r:e)(n)}function e(t){return ue(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){M=NaN,S.point=u,t.lineStart()}function u(e,r){var u=dt([e,r]),o=n(e,r);i(M,x,m,b,_,w,M=o[0],x=o[1],m=e,b=u[0],_=u[1],w=u[2],a,t),t.point(M,x)}function o(){S.point=e,t.lineEnd()}function l(){ r(),S.point=c,S.lineEnd=f}function c(n,t){u(s=n,h=t),p=M,g=x,v=b,d=_,y=w,S.point=u}function f(){i(M,x,m,b,_,w,p,g,s,v,d,y,a,t),S.lineEnd=o,o()}var s,h,p,g,v,d,y,m,M,x,b,_,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=l},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function i(t,e,r,a,l,c,f,s,h,p,g,v,d,y){var m=f-t,M=s-e,x=m*m+M*M;if(x>4*u&&d--){var b=a+p,_=l+g,w=c+v,S=Math.sqrt(b*b+_*_+w*w),k=Math.asin(w/=S),N=xo(xo(w)-1)u||xo((m*z+M*L)/x-.5)>.3||o>a*p+l*g+c*v)&&(i(t,e,r,a,l,c,A,C,N,b/=S,_/=S,w,d,y),y.point(A,C),i(A,C,N,b,_,w,f,s,h,p,g,v,d,y))}}var u=.5,o=Math.cos(30*Yo),a=16;return t.precision=function(n){return arguments.length?(a=(u=n*n)>0&&16,t):Math.sqrt(u)},t}function re(n){var t=ee(function(t,e){return n([t*Zo,e*Zo])});return function(n){return le(t(n))}}function ie(n){this.stream=n}function ue(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function oe(n){return ae(function(){return n})()}function ae(n){function t(n){return n=a(n[0]*Yo,n[1]*Yo),[n[0]*h+l,c-n[1]*h]}function e(n){return n=a.invert((n[0]-l)/h,(c-n[1])/h),n&&[n[0]*Zo,n[1]*Zo]}function r(){a=Ct(o=se(y,M,x),u);var n=u(v,d);return l=p-n[0]*h,c=g+n[1]*h,i()}function i(){return f&&(f.valid=!1,f=null),t}var u,o,a,l,c,f,s=ee(function(n,t){return n=u(n,t),[n[0]*h+l,c-n[1]*h]}),h=150,p=480,g=250,v=0,d=0,y=0,M=0,x=0,b=Fa,_=m,w=null,S=null;return t.stream=function(n){return f&&(f.valid=!1),f=le(b(o,s(_(n)))),f.valid=!0,f},t.clipAngle=function(n){return arguments.length?(b=null==n?(w=n,Fa):It((w=+n)*Yo),i()):w},t.clipExtent=function(n){return arguments.length?(S=n,_=n?Zt(n[0][0],n[0][1],n[1][0],n[1][1]):m,i()):S},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(p=+n[0],g=+n[1],r()):[p,g]},t.center=function(n){return arguments.length?(v=n[0]%360*Yo,d=n[1]%360*Yo,r()):[v*Zo,d*Zo]},t.rotate=function(n){return arguments.length?(y=n[0]%360*Yo,M=n[1]%360*Yo,x=n.length>2?n[2]%360*Yo:0,r()):[y*Zo,M*Zo,x*Zo]},ao.rebind(t,s,"precision"),function(){return u=n.apply(this,arguments),t.invert=u.invert&&e,r()}}function le(n){return ue(n,function(t,e){n.point(t*Yo,e*Yo)})}function ce(n,t){return[n,t]}function fe(n,t){return[n>Fo?n-Ho:-Fo>n?n+Ho:n,t]}function se(n,t,e){return n?t||e?Ct(pe(n),ge(t,e)):pe(n):t||e?ge(t,e):fe}function he(n){return function(t,e){return t+=n,[t>Fo?t-Ho:-Fo>t?t+Ho:t,e]}}function pe(n){var t=he(n);return t.invert=he(-n),t}function ge(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,l=Math.sin(n)*e,c=Math.sin(t),f=c*r+a*i;return[Math.atan2(l*u-f*o,a*r-c*i),tn(f*u+l*o)]}var r=Math.cos(n),i=Math.sin(n),u=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,l=Math.sin(n)*e,c=Math.sin(t),f=c*u-l*o;return[Math.atan2(l*u+c*o,a*r+f*i),tn(f*r-a*i)]},e}function ve(n,t){var e=Math.cos(n),r=Math.sin(n);return function(i,u,o,a){var l=o*t;null!=i?(i=de(e,i),u=de(e,u),(o>0?u>i:i>u)&&(i+=o*Ho)):(i=n+o*Ho,u=n-.5*l);for(var c,f=i;o>0?f>u:u>f;f-=l)a.point((c=_t([e,-r*Math.cos(f),-r*Math.sin(f)]))[0],c[1])}}function de(n,t){var e=dt(t);e[0]-=n,bt(e);var r=nn(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Uo)%(2*Math.PI)}function ye(n,t,e){var r=ao.range(n,t-Uo,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function me(n,t,e){var r=ao.range(n,t-Uo,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function Me(n){return n.source}function xe(n){return n.target}function be(n,t,e,r){var i=Math.cos(t),u=Math.sin(t),o=Math.cos(r),a=Math.sin(r),l=i*Math.cos(n),c=i*Math.sin(n),f=o*Math.cos(e),s=o*Math.sin(e),h=2*Math.asin(Math.sqrt(on(r-t)+i*o*on(e-n))),p=1/Math.sin(h),g=h?function(n){var t=Math.sin(n*=h)*p,e=Math.sin(h-n)*p,r=e*l+t*f,i=e*c+t*s,o=e*u+t*a;return[Math.atan2(i,r)*Zo,Math.atan2(o,Math.sqrt(r*r+i*i))*Zo]}:function(){return[n*Zo,t*Zo]};return g.distance=h,g}function _e(){function n(n,i){var u=Math.sin(i*=Yo),o=Math.cos(i),a=xo((n*=Yo)-t),l=Math.cos(a);Ja+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*u-e*o*l)*a),e*u+r*o*l),t=n,e=u,r=o}var t,e,r;Ga.point=function(i,u){t=i*Yo,e=Math.sin(u*=Yo),r=Math.cos(u),Ga.point=n},Ga.lineEnd=function(){Ga.point=Ga.lineEnd=b}}function we(n,t){function e(t,e){var r=Math.cos(t),i=Math.cos(e),u=n(r*i);return[u*i*Math.sin(t),u*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),i=t(r),u=Math.sin(i),o=Math.cos(i);return[Math.atan2(n*u,r*o),Math.asin(r&&e*u/r)]},e}function Se(n,t){function e(n,t){o>0?-Io+Uo>t&&(t=-Io+Uo):t>Io-Uo&&(t=Io-Uo);var e=o/Math.pow(i(t),u);return[e*Math.sin(u*n),o-e*Math.cos(u*n)]}var r=Math.cos(n),i=function(n){return Math.tan(Fo/4+n/2)},u=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(i(t)/i(n)),o=r*Math.pow(i(n),u)/u;return u?(e.invert=function(n,t){var e=o-t,r=K(u)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/u,2*Math.atan(Math.pow(o/r,1/u))-Io]},e):Ne}function ke(n,t){function e(n,t){var e=u-t;return[e*Math.sin(i*n),u-e*Math.cos(i*n)]}var r=Math.cos(n),i=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),u=r/i+n;return xo(i)i;i++){for(;r>1&&Q(n[e[r-2]],n[e[r-1]],n[i])<=0;)--r;e[r++]=i}return e.slice(0,r)}function qe(n,t){return n[0]-t[0]||n[1]-t[1]}function Te(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Re(n,t,e,r){var i=n[0],u=e[0],o=t[0]-i,a=r[0]-u,l=n[1],c=e[1],f=t[1]-l,s=r[1]-c,h=(a*(l-c)-s*(i-u))/(s*o-a*f);return[i+h*o,l+h*f]}function De(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Pe(){rr(this),this.edge=this.site=this.circle=null}function Ue(n){var t=cl.pop()||new Pe;return t.site=n,t}function je(n){Be(n),ol.remove(n),cl.push(n),rr(n)}function Fe(n){var t=n.circle,e=t.x,r=t.cy,i={x:e,y:r},u=n.P,o=n.N,a=[n];je(n);for(var l=u;l.circle&&xo(e-l.circle.x)f;++f)c=a[f],l=a[f-1],nr(c.edge,l.site,c.site,i);l=a[0],c=a[s-1],c.edge=Ke(l.site,c.site,null,i),$e(l),$e(c)}function He(n){for(var t,e,r,i,u=n.x,o=n.y,a=ol._;a;)if(r=Oe(a,o)-u,r>Uo)a=a.L;else{if(i=u-Ie(a,o),!(i>Uo)){r>-Uo?(t=a.P,e=a):i>-Uo?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var l=Ue(n);if(ol.insert(t,l),t||e){if(t===e)return Be(t),e=Ue(t.site),ol.insert(l,e),l.edge=e.edge=Ke(t.site,l.site),$e(t),void $e(e);if(!e)return void(l.edge=Ke(t.site,l.site));Be(t),Be(e);var c=t.site,f=c.x,s=c.y,h=n.x-f,p=n.y-s,g=e.site,v=g.x-f,d=g.y-s,y=2*(h*d-p*v),m=h*h+p*p,M=v*v+d*d,x={x:(d*m-p*M)/y+f,y:(h*M-v*m)/y+s};nr(e.edge,c,g,x),l.edge=Ke(c,n,null,x),e.edge=Ke(n,g,null,x),$e(t),$e(e)}}function Oe(n,t){var e=n.site,r=e.x,i=e.y,u=i-t;if(!u)return r;var o=n.P;if(!o)return-(1/0);e=o.site;var a=e.x,l=e.y,c=l-t;if(!c)return a;var f=a-r,s=1/u-1/c,h=f/c;return s?(-h+Math.sqrt(h*h-2*s*(f*f/(-2*c)-l+c/2+i-u/2)))/s+r:(r+a)/2}function Ie(n,t){var e=n.N;if(e)return Oe(e,t);var r=n.site;return r.y===t?r.x:1/0}function Ye(n){this.site=n,this.edges=[]}function Ze(n){for(var t,e,r,i,u,o,a,l,c,f,s=n[0][0],h=n[1][0],p=n[0][1],g=n[1][1],v=ul,d=v.length;d--;)if(u=v[d],u&&u.prepare())for(a=u.edges,l=a.length,o=0;l>o;)f=a[o].end(),r=f.x,i=f.y,c=a[++o%l].start(),t=c.x,e=c.y,(xo(r-t)>Uo||xo(i-e)>Uo)&&(a.splice(o,0,new tr(Qe(u.site,f,xo(r-s)Uo?{x:s,y:xo(t-s)Uo?{x:xo(e-g)Uo?{x:h,y:xo(t-h)Uo?{x:xo(e-p)=-jo)){var p=l*l+c*c,g=f*f+s*s,v=(s*p-c*g)/h,d=(l*g-f*p)/h,s=d+a,y=fl.pop()||new Xe;y.arc=n,y.site=i,y.x=v+o,y.y=s+Math.sqrt(v*v+d*d),y.cy=s,n.circle=y;for(var m=null,M=ll._;M;)if(y.yd||d>=a)return;if(h>g){if(u){if(u.y>=c)return}else u={x:d,y:l};e={x:d,y:c}}else{if(u){if(u.yr||r>1)if(h>g){if(u){if(u.y>=c)return}else u={x:(l-i)/r,y:l};e={x:(c-i)/r,y:c}}else{if(u){if(u.yp){if(u){if(u.x>=a)return}else u={x:o,y:r*o+i};e={x:a,y:r*a+i}}else{if(u){if(u.xu||s>o||r>h||i>p)){if(g=n.point){var g,v=t-n.x,d=e-n.y,y=v*v+d*d;if(l>y){var m=Math.sqrt(l=y);r=t-m,i=e-m,u=t+m,o=e+m,a=g}}for(var M=n.nodes,x=.5*(f+h),b=.5*(s+p),_=t>=x,w=e>=b,S=w<<1|_,k=S+4;k>S;++S)if(n=M[3&S])switch(3&S){case 0:c(n,f,s,x,b);break;case 1:c(n,x,s,h,b);break;case 2:c(n,f,b,x,p);break;case 3:c(n,x,b,h,p)}}}(n,r,i,u,o),a}function vr(n,t){n=ao.rgb(n),t=ao.rgb(t);var e=n.r,r=n.g,i=n.b,u=t.r-e,o=t.g-r,a=t.b-i;return function(n){return"#"+bn(Math.round(e+u*n))+bn(Math.round(r+o*n))+bn(Math.round(i+a*n))}}function dr(n,t){var e,r={},i={};for(e in n)e in t?r[e]=Mr(n[e],t[e]):i[e]=n[e];for(e in t)e in n||(i[e]=t[e]);return function(n){for(e in r)i[e]=r[e](n);return i}}function yr(n,t){return n=+n,t=+t,function(e){return n*(1-e)+t*e}}function mr(n,t){var e,r,i,u=hl.lastIndex=pl.lastIndex=0,o=-1,a=[],l=[];for(n+="",t+="";(e=hl.exec(n))&&(r=pl.exec(t));)(i=r.index)>u&&(i=t.slice(u,i),a[o]?a[o]+=i:a[++o]=i),(e=e[0])===(r=r[0])?a[o]?a[o]+=r:a[++o]=r:(a[++o]=null,l.push({i:o,x:yr(e,r)})),u=pl.lastIndex;return ur;++r)a[(e=l[r]).i]=e.x(n);return a.join("")})}function Mr(n,t){for(var e,r=ao.interpolators.length;--r>=0&&!(e=ao.interpolators[r](n,t)););return e}function xr(n,t){var e,r=[],i=[],u=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(Mr(n[e],t[e]));for(;u>e;++e)i[e]=n[e];for(;o>e;++e)i[e]=t[e];return function(n){for(e=0;a>e;++e)i[e]=r[e](n);return i}}function br(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function _r(n){return function(t){return 1-n(1-t)}}function wr(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function Sr(n){return n*n}function kr(n){return n*n*n}function Nr(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function Er(n){return function(t){return Math.pow(t,n)}}function Ar(n){return 1-Math.cos(n*Io)}function Cr(n){return Math.pow(2,10*(n-1))}function zr(n){return 1-Math.sqrt(1-n*n)}function Lr(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/Ho*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*Ho/t)}}function qr(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function Tr(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Rr(n,t){n=ao.hcl(n),t=ao.hcl(t);var e=n.h,r=n.c,i=n.l,u=t.h-e,o=t.c-r,a=t.l-i;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(u)?(u=0,e=isNaN(e)?t.h:e):u>180?u-=360:-180>u&&(u+=360),function(n){return sn(e+u*n,r+o*n,i+a*n)+""}}function Dr(n,t){n=ao.hsl(n),t=ao.hsl(t);var e=n.h,r=n.s,i=n.l,u=t.h-e,o=t.s-r,a=t.l-i;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(u)?(u=0,e=isNaN(e)?t.h:e):u>180?u-=360:-180>u&&(u+=360),function(n){return cn(e+u*n,r+o*n,i+a*n)+""}}function Pr(n,t){n=ao.lab(n),t=ao.lab(t);var e=n.l,r=n.a,i=n.b,u=t.l-e,o=t.a-r,a=t.b-i;return function(n){return pn(e+u*n,r+o*n,i+a*n)+""}}function Ur(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function jr(n){var t=[n.a,n.b],e=[n.c,n.d],r=Hr(t),i=Fr(t,e),u=Hr(Or(e,t,-i))||0;t[0]*e[1]180?t+=360:t-n>180&&(n+=360),r.push({i:e.push(Ir(e)+"rotate(",null,")")-2,x:yr(n,t)})):t&&e.push(Ir(e)+"rotate("+t+")")}function Vr(n,t,e,r){n!==t?r.push({i:e.push(Ir(e)+"skewX(",null,")")-2,x:yr(n,t)}):t&&e.push(Ir(e)+"skewX("+t+")")}function Xr(n,t,e,r){if(n[0]!==t[0]||n[1]!==t[1]){var i=e.push(Ir(e)+"scale(",null,",",null,")");r.push({i:i-4,x:yr(n[0],t[0])},{i:i-2,x:yr(n[1],t[1])})}else 1===t[0]&&1===t[1]||e.push(Ir(e)+"scale("+t+")")}function $r(n,t){var e=[],r=[];return n=ao.transform(n),t=ao.transform(t),Yr(n.translate,t.translate,e,r),Zr(n.rotate,t.rotate,e,r),Vr(n.skew,t.skew,e,r),Xr(n.scale,t.scale,e,r),n=t=null,function(n){for(var t,i=-1,u=r.length;++i=0;)e.push(i[r])}function oi(n,t){for(var e=[n],r=[];null!=(n=e.pop());)if(r.push(n),(u=n.children)&&(i=u.length))for(var i,u,o=-1;++oe;++e)(t=n[e][1])>i&&(r=e,i=t);return r}function yi(n){return n.reduce(mi,0)}function mi(n,t){return n+t[1]}function Mi(n,t){return xi(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function xi(n,t){for(var e=-1,r=+n[0],i=(n[1]-r)/t,u=[];++e<=t;)u[e]=i*e+r;return u}function bi(n){return[ao.min(n),ao.max(n)]}function _i(n,t){return n.value-t.value}function wi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Si(n,t){n._pack_next=t,t._pack_prev=n}function ki(n,t){var e=t.x-n.x,r=t.y-n.y,i=n.r+t.r;return.999*i*i>e*e+r*r}function Ni(n){function t(n){f=Math.min(n.x-n.r,f),s=Math.max(n.x+n.r,s),h=Math.min(n.y-n.r,h),p=Math.max(n.y+n.r,p)}if((e=n.children)&&(c=e.length)){var e,r,i,u,o,a,l,c,f=1/0,s=-(1/0),h=1/0,p=-(1/0);if(e.forEach(Ei),r=e[0],r.x=-r.r,r.y=0,t(r),c>1&&(i=e[1],i.x=i.r,i.y=0,t(i),c>2))for(u=e[2],zi(r,i,u),t(u),wi(r,u),r._pack_prev=u,wi(u,i),i=r._pack_next,o=3;c>o;o++){zi(r,i,u=e[o]);var g=0,v=1,d=1;for(a=i._pack_next;a!==i;a=a._pack_next,v++)if(ki(a,u)){g=1;break}if(1==g)for(l=r._pack_prev;l!==a._pack_prev&&!ki(l,u);l=l._pack_prev,d++);g?(d>v||v==d&&i.ro;o++)u=e[o],u.x-=y,u.y-=m,M=Math.max(M,u.r+Math.sqrt(u.x*u.x+u.y*u.y));n.r=M,e.forEach(Ai)}}function Ei(n){n._pack_next=n._pack_prev=n}function Ai(n){delete n._pack_next,delete n._pack_prev}function Ci(n,t,e,r){var i=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,i)for(var u=-1,o=i.length;++u=0;)t=i[u],t.z+=e,t.m+=e,e+=t.s+(r+=t.c)}function Pi(n,t,e){return n.a.parent===t.parent?n.a:e}function Ui(n){return 1+ao.max(n,function(n){return n.y})}function ji(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Fi(n){var t=n.children;return t&&t.length?Fi(t[0]):n}function Hi(n){var t,e=n.children;return e&&(t=e.length)?Hi(e[t-1]):n}function Oi(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function Ii(n,t){var e=n.x+t[3],r=n.y+t[0],i=n.dx-t[1]-t[3],u=n.dy-t[0]-t[2];return 0>i&&(e+=i/2,i=0),0>u&&(r+=u/2,u=0),{x:e,y:r,dx:i,dy:u}}function Yi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Zi(n){return n.rangeExtent?n.rangeExtent():Yi(n.range())}function Vi(n,t,e,r){var i=e(n[0],n[1]),u=r(t[0],t[1]);return function(n){return u(i(n))}}function Xi(n,t){var e,r=0,i=n.length-1,u=n[r],o=n[i];return u>o&&(e=r,r=i,i=e,e=u,u=o,o=e),n[r]=t.floor(u),n[i]=t.ceil(o),n}function $i(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:Sl}function Bi(n,t,e,r){var i=[],u=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]2?Bi:Vi,l=r?Wr:Br;return o=i(n,t,l,e),a=i(t,n,l,Mr),u}function u(n){return o(n)}var o,a;return u.invert=function(n){return a(n)},u.domain=function(t){return arguments.length?(n=t.map(Number),i()):n},u.range=function(n){return arguments.length?(t=n,i()):t},u.rangeRound=function(n){return u.range(n).interpolate(Ur)},u.clamp=function(n){return arguments.length?(r=n,i()):r},u.interpolate=function(n){return arguments.length?(e=n,i()):e},u.ticks=function(t){return Qi(n,t)},u.tickFormat=function(t,e){return nu(n,t,e)},u.nice=function(t){return Gi(n,t),i()},u.copy=function(){return Wi(n,t,e,r)},i()}function Ji(n,t){return ao.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Gi(n,t){return Xi(n,$i(Ki(n,t)[2])),Xi(n,$i(Ki(n,t)[2])),n}function Ki(n,t){null==t&&(t=10);var e=Yi(n),r=e[1]-e[0],i=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),u=t/r*i;return.15>=u?i*=10:.35>=u?i*=5:.75>=u&&(i*=2),e[0]=Math.ceil(e[0]/i)*i,e[1]=Math.floor(e[1]/i)*i+.5*i,e[2]=i,e}function Qi(n,t){return ao.range.apply(ao,Ki(n,t))}function nu(n,t,e){var r=Ki(n,t);if(e){var i=ha.exec(e);if(i.shift(),"s"===i[8]){var u=ao.formatPrefix(Math.max(xo(r[0]),xo(r[1])));return i[7]||(i[7]="."+tu(u.scale(r[2]))),i[8]="f",e=ao.format(i.join("")),function(n){return e(u.scale(n))+u.symbol}}i[7]||(i[7]="."+eu(i[8],r)),e=i.join("")}else e=",."+tu(r[2])+"f";return ao.format(e)}function tu(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function eu(n,t){var e=tu(t[2]);return n in kl?Math.abs(e-tu(Math.max(xo(t[0]),xo(t[1]))))+ +("e"!==n):e-2*("%"===n)}function ru(n,t,e,r){function i(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function u(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(i(t))}return o.invert=function(t){return u(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(i)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(i)),o):t},o.nice=function(){var t=Xi(r.map(i),e?Math:El);return n.domain(t),r=t.map(u),o},o.ticks=function(){var n=Yi(r),o=[],a=n[0],l=n[1],c=Math.floor(i(a)),f=Math.ceil(i(l)),s=t%1?2:t;if(isFinite(f-c)){if(e){for(;f>c;c++)for(var h=1;s>h;h++)o.push(u(c)*h);o.push(u(c))}else for(o.push(u(c));c++0;h--)o.push(u(c)*h);for(c=0;o[c]l;f--);o=o.slice(c,f)}return o},o.tickFormat=function(n,e){if(!arguments.length)return Nl;arguments.length<2?e=Nl:"function"!=typeof e&&(e=ao.format(e));var r=Math.max(1,t*n/o.ticks().length);return function(n){var o=n/u(Math.round(i(n)));return t-.5>o*t&&(o*=t),r>=o?e(n):""}},o.copy=function(){return ru(n.copy(),t,e,r)},Ji(o,n)}function iu(n,t,e){function r(t){return n(i(t))}var i=uu(t),u=uu(1/t);return r.invert=function(t){return u(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(i)),r):e},r.ticks=function(n){return Qi(e,n)},r.tickFormat=function(n,t){return nu(e,n,t)},r.nice=function(n){return r.domain(Gi(e,n))},r.exponent=function(o){return arguments.length?(i=uu(t=o),u=uu(1/t),n.domain(e.map(i)),r):t},r.copy=function(){return iu(n.copy(),t,e)},Ji(r,n)}function uu(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function ou(n,t){function e(e){return u[((i.get(e)||("range"===t.t?i.set(e,n.push(e)):NaN))-1)%u.length]}function r(t,e){return ao.range(n.length).map(function(n){return t+e*n})}var i,u,o;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new c;for(var u,o=-1,a=r.length;++oe?[NaN,NaN]:[e>0?a[e-1]:n[0],et?NaN:t/u+n,[t,t+1/u]},r.copy=function(){return lu(n,t,e)},i()}function cu(n,t){function e(e){return e>=e?t[ao.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return cu(n,t)},e}function fu(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Qi(n,t)},t.tickFormat=function(t,e){return nu(n,t,e)},t.copy=function(){return fu(n)},t}function su(){return 0}function hu(n){return n.innerRadius}function pu(n){return n.outerRadius}function gu(n){return n.startAngle}function vu(n){return n.endAngle}function du(n){return n&&n.padAngle}function yu(n,t,e,r){return(n-e)*t-(t-r)*n>0?0:1}function mu(n,t,e,r,i){var u=n[0]-t[0],o=n[1]-t[1],a=(i?r:-r)/Math.sqrt(u*u+o*o),l=a*o,c=-a*u,f=n[0]+l,s=n[1]+c,h=t[0]+l,p=t[1]+c,g=(f+h)/2,v=(s+p)/2,d=h-f,y=p-s,m=d*d+y*y,M=e-r,x=f*p-h*s,b=(0>y?-1:1)*Math.sqrt(Math.max(0,M*M*m-x*x)),_=(x*y-d*b)/m,w=(-x*d-y*b)/m,S=(x*y+d*b)/m,k=(-x*d+y*b)/m,N=_-g,E=w-v,A=S-g,C=k-v;return N*N+E*E>A*A+C*C&&(_=S,w=k),[[_-l,w-c],[_*e/M,w*e/M]]}function Mu(n){function t(t){function o(){c.push("M",u(n(f),a))}for(var l,c=[],f=[],s=-1,h=t.length,p=En(e),g=En(r);++s1?n.join("L"):n+"Z"}function bu(n){return n.join("L")+"Z"}function _u(n){for(var t=0,e=n.length,r=n[0],i=[r[0],",",r[1]];++t1&&i.push("H",r[0]),i.join("")}function wu(n){for(var t=0,e=n.length,r=n[0],i=[r[0],",",r[1]];++t1){a=t[1],u=n[l],l++,r+="C"+(i[0]+o[0])+","+(i[1]+o[1])+","+(u[0]-a[0])+","+(u[1]-a[1])+","+u[0]+","+u[1];for(var c=2;c9&&(i=3*t/Math.sqrt(i),o[a]=i*e,o[a+1]=i*r));for(a=-1;++a<=l;)i=(n[Math.min(l,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),u.push([i||0,o[a]*i||0]);return u}function Fu(n){return n.length<3?xu(n):n[0]+Au(n,ju(n))}function Hu(n){for(var t,e,r,i=-1,u=n.length;++i=t?o(n-t):void(f.c=o)}function o(e){var i=g.active,u=g[i];u&&(u.timer.c=null,u.timer.t=NaN,--g.count,delete g[i],u.event&&u.event.interrupt.call(n,n.__data__,u.index));for(var o in g)if(r>+o){var c=g[o];c.timer.c=null,c.timer.t=NaN,--g.count,delete g[o]}f.c=a,qn(function(){return f.c&&a(e||1)&&(f.c=null,f.t=NaN),1},0,l),g.active=r,v.event&&v.event.start.call(n,n.__data__,t),p=[],v.tween.forEach(function(e,r){(r=r.call(n,n.__data__,t))&&p.push(r)}),h=v.ease,s=v.duration}function a(i){for(var u=i/s,o=h(u),a=p.length;a>0;)p[--a].call(n,o);return u>=1?(v.event&&v.event.end.call(n,n.__data__,t),--g.count?delete g[r]:delete n[e],1):void 0}var l,f,s,h,p,g=n[e]||(n[e]={active:0,count:0}),v=g[r];v||(l=i.time,f=qn(u,0,l),v=g[r]={tween:new c,time:l,timer:f,delay:i.delay,duration:i.duration,ease:i.ease,index:t},i=null,++g.count)}function no(n,t,e){n.attr("transform",function(n){var r=t(n);return"translate("+(isFinite(r)?r:e(n))+",0)"})}function to(n,t,e){n.attr("transform",function(n){var r=t(n);return"translate(0,"+(isFinite(r)?r:e(n))+")"})}function eo(n){return n.toISOString()}function ro(n,t,e){function r(t){return n(t)}function i(n,e){var r=n[1]-n[0],i=r/e,u=ao.bisect(Kl,i);return u==Kl.length?[t.year,Ki(n.map(function(n){return n/31536e6}),e)[2]]:u?t[i/Kl[u-1]1?{floor:function(t){for(;e(t=n.floor(t));)t=io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=Yi(r.domain()),u=null==n?i(e,10):"number"==typeof n?i(e,n):!n.range&&[{range:n},t];return u&&(n=u[0],t=u[1]),n.range(e[0],io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return ro(n.copy(),t,e)},Ji(r,n)}function io(n){return new Date(n)}function uo(n){return JSON.parse(n.responseText)}function oo(n){var t=fo.createRange();return t.selectNode(fo.body),t.createContextualFragment(n.responseText)}var ao={version:"3.5.17"},lo=[].slice,co=function(n){return lo.call(n)},fo=this.document;if(fo)try{co(fo.documentElement.childNodes)[0].nodeType}catch(so){co=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}if(Date.now||(Date.now=function(){return+new Date}),fo)try{fo.createElement("DIV").style.setProperty("opacity",0,"")}catch(ho){var po=this.Element.prototype,go=po.setAttribute,vo=po.setAttributeNS,yo=this.CSSStyleDeclaration.prototype,mo=yo.setProperty;po.setAttribute=function(n,t){go.call(this,n,t+"")},po.setAttributeNS=function(n,t,e){vo.call(this,n,t,e+"")},yo.setProperty=function(n,t,e){mo.call(this,n,t+"",e)}}ao.ascending=e,ao.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:NaN},ao.min=function(n,t){var e,r,i=-1,u=n.length;if(1===arguments.length){for(;++i=r){e=r;break}for(;++ir&&(e=r)}else{for(;++i=r){e=r;break}for(;++ir&&(e=r)}return e},ao.max=function(n,t){var e,r,i=-1,u=n.length;if(1===arguments.length){for(;++i=r){e=r;break}for(;++ie&&(e=r)}else{for(;++i=r){e=r;break}for(;++ie&&(e=r)}return e},ao.extent=function(n,t){var e,r,i,u=-1,o=n.length;if(1===arguments.length){for(;++u=r){e=i=r;break}for(;++ur&&(e=r),r>i&&(i=r))}else{for(;++u=r){e=i=r;break}for(;++ur&&(e=r),r>i&&(i=r))}return[e,i]},ao.sum=function(n,t){var e,r=0,u=n.length,o=-1;if(1===arguments.length)for(;++o1?l/(f-1):void 0},ao.deviation=function(){var n=ao.variance.apply(this,arguments);return n?Math.sqrt(n):n};var Mo=u(e);ao.bisectLeft=Mo.left,ao.bisect=ao.bisectRight=Mo.right,ao.bisector=function(n){return u(1===n.length?function(t,r){return e(n(t),r)}:n)},ao.shuffle=function(n,t,e){(u=arguments.length)<3&&(e=n.length,2>u&&(t=0));for(var r,i,u=e-t;u;)i=Math.random()*u--|0,r=n[u+t],n[u+t]=n[i+t],n[i+t]=r;return n},ao.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},ao.pairs=function(n){for(var t,e=0,r=n.length-1,i=n[0],u=new Array(0>r?0:r);r>e;)u[e]=[t=i,i=n[++e]];return u},ao.transpose=function(n){if(!(i=n.length))return[];for(var t=-1,e=ao.min(n,o),r=new Array(e);++t=0;)for(r=n[i],t=r.length;--t>=0;)e[--o]=r[t];return e};var xo=Math.abs;ao.range=function(n,t,e){if(arguments.length<3&&(e=1,arguments.length<2&&(t=n,n=0)),(t-n)/e===1/0)throw new Error("infinite range");var r,i=[],u=a(xo(e)),o=-1;if(n*=u,t*=u,e*=u,0>e)for(;(r=n+e*++o)>t;)i.push(r/u);else for(;(r=n+e*++o)=u.length)return r?r.call(i,o):e?o.sort(e):o;for(var l,f,s,h,p=-1,g=o.length,v=u[a++],d=new c;++p=u.length)return n;var r=[],i=o[e++];return n.forEach(function(n,i){r.push({key:n,values:t(i,e)})}),i?r.sort(function(n,t){return i(n.key,t.key)}):r}var e,r,i={},u=[],o=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(ao.map,e,0),0)},i.key=function(n){return u.push(n),i},i.sortKeys=function(n){return o[u.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},ao.set=function(n){var t=new y;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},l(y,{has:h,add:function(n){return this._[f(n+="")]=!0,n},remove:p,values:g,size:v,empty:d,forEach:function(n){for(var t in this._)n.call(this,s(t))}}),ao.behavior={},ao.rebind=function(n,t){for(var e,r=1,i=arguments.length;++r=0&&(r=n.slice(e+1),n=n.slice(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},ao.event=null,ao.requote=function(n){return n.replace(So,"\\$&")};var So=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,ko={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},No=function(n,t){return t.querySelector(n)},Eo=function(n,t){return t.querySelectorAll(n)},Ao=function(n,t){var e=n.matches||n[x(n,"matchesSelector")];return(Ao=function(n,t){return e.call(n,t)})(n,t)};"function"==typeof Sizzle&&(No=function(n,t){return Sizzle(n,t)[0]||null},Eo=Sizzle,Ao=Sizzle.matchesSelector),ao.selection=function(){return ao.select(fo.documentElement)};var Co=ao.selection.prototype=[];Co.select=function(n){var t,e,r,i,u=[];n=A(n);for(var o=-1,a=this.length;++o=0&&"xmlns"!==(e=n.slice(0,t))&&(n=n.slice(t+1)),Lo.hasOwnProperty(e)?{space:Lo[e],local:n}:n}},Co.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=ao.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(z(t,n[t]));return this}return this.each(z(n,t))},Co.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=T(n)).length,i=-1;if(t=e.classList){for(;++ii){if("string"!=typeof n){2>i&&(e="");for(r in n)this.each(P(r,n[r],e));return this}if(2>i){var u=this.node();return t(u).getComputedStyle(u,null).getPropertyValue(n)}r=""}return this.each(P(n,e,r))},Co.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(U(t,n[t]));return this}return this.each(U(n,t))},Co.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},Co.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},Co.append=function(n){return n=j(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},Co.insert=function(n,t){return n=j(n),t=A(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},Co.remove=function(){return this.each(F)},Co.data=function(n,t){function e(n,e){var r,i,u,o=n.length,s=e.length,h=Math.min(o,s),p=new Array(s),g=new Array(s),v=new Array(o);if(t){var d,y=new c,m=new Array(o);for(r=-1;++rr;++r)g[r]=H(e[r]);for(;o>r;++r)v[r]=n[r]}g.update=p,g.parentNode=p.parentNode=v.parentNode=n.parentNode,a.push(g),l.push(p),f.push(v)}var r,i,u=-1,o=this.length;if(!arguments.length){for(n=new Array(o=(r=this[0]).length);++uu;u++){i.push(t=[]),t.parentNode=(e=this[u]).parentNode;for(var a=0,l=e.length;l>a;a++)(r=e[a])&&n.call(r,r.__data__,a,u)&&t.push(r)}return E(i)},Co.order=function(){for(var n=-1,t=this.length;++n=0;)(e=r[i])&&(u&&u!==e.nextSibling&&u.parentNode.insertBefore(e,u),u=e);return this},Co.sort=function(n){n=I.apply(this,arguments);for(var t=-1,e=this.length;++tn;n++)for(var e=this[n],r=0,i=e.length;i>r;r++){var u=e[r];if(u)return u}return null},Co.size=function(){var n=0;return Y(this,function(){++n}),n};var qo=[];ao.selection.enter=Z,ao.selection.enter.prototype=qo,qo.append=Co.append,qo.empty=Co.empty,qo.node=Co.node,qo.call=Co.call,qo.size=Co.size,qo.select=function(n){for(var t,e,r,i,u,o=[],a=-1,l=this.length;++ar){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(X(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(X(n,t,e))};var To=ao.map({mouseenter:"mouseover",mouseleave:"mouseout"});fo&&To.forEach(function(n){"on"+n in fo&&To.remove(n)});var Ro,Do=0;ao.mouse=function(n){return J(n,k())};var Po=this.navigator&&/WebKit/.test(this.navigator.userAgent)?-1:0;ao.touch=function(n,t,e){if(arguments.length<3&&(e=t,t=k().changedTouches),t)for(var r,i=0,u=t.length;u>i;++i)if((r=t[i]).identifier===e)return J(n,r)},ao.behavior.drag=function(){function n(){this.on("mousedown.drag",u).on("touchstart.drag",o)}function e(n,t,e,u,o){return function(){function a(){var n,e,r=t(h,v);r&&(n=r[0]-M[0],e=r[1]-M[1],g|=n|e,M=r,p({type:"drag",x:r[0]+c[0],y:r[1]+c[1],dx:n,dy:e}))}function l(){t(h,v)&&(y.on(u+d,null).on(o+d,null),m(g),p({type:"dragend"}))}var c,f=this,s=ao.event.target.correspondingElement||ao.event.target,h=f.parentNode,p=r.of(f,arguments),g=0,v=n(),d=".drag"+(null==v?"":"-"+v),y=ao.select(e(s)).on(u+d,a).on(o+d,l),m=W(s),M=t(h,v);i?(c=i.apply(f,arguments),c=[c.x-M[0],c.y-M[1]]):c=[0,0],p({type:"dragstart"})}}var r=N(n,"drag","dragstart","dragend"),i=null,u=e(b,ao.mouse,t,"mousemove","mouseup"),o=e(G,ao.touch,m,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},ao.rebind(n,r,"on")},ao.touches=function(n,t){return arguments.length<2&&(t=k().touches),t?co(t).map(function(t){var e=J(n,t);return e.identifier=t.identifier,e}):[]};var Uo=1e-6,jo=Uo*Uo,Fo=Math.PI,Ho=2*Fo,Oo=Ho-Uo,Io=Fo/2,Yo=Fo/180,Zo=180/Fo,Vo=Math.SQRT2,Xo=2,$o=4;ao.interpolateZoom=function(n,t){var e,r,i=n[0],u=n[1],o=n[2],a=t[0],l=t[1],c=t[2],f=a-i,s=l-u,h=f*f+s*s;if(jo>h)r=Math.log(c/o)/Vo,e=function(n){return[i+n*f,u+n*s,o*Math.exp(Vo*n*r)]};else{var p=Math.sqrt(h),g=(c*c-o*o+$o*h)/(2*o*Xo*p),v=(c*c-o*o-$o*h)/(2*c*Xo*p),d=Math.log(Math.sqrt(g*g+1)-g),y=Math.log(Math.sqrt(v*v+1)-v);r=(y-d)/Vo,e=function(n){var t=n*r,e=rn(d),a=o/(Xo*p)*(e*un(Vo*t+d)-en(d));return[i+a*f,u+a*s,o*e/rn(Vo*t+d)]}}return e.duration=1e3*r,e},ao.behavior.zoom=function(){function n(n){n.on(L,s).on(Wo+".zoom",p).on("dblclick.zoom",g).on(R,h)}function e(n){return[(n[0]-k.x)/k.k,(n[1]-k.y)/k.k]}function r(n){return[n[0]*k.k+k.x,n[1]*k.k+k.y]}function i(n){k.k=Math.max(A[0],Math.min(A[1],n))}function u(n,t){t=r(t),k.x+=n[0]-t[0],k.y+=n[1]-t[1]}function o(t,e,r,o){t.__chart__={x:k.x,y:k.y,k:k.k},i(Math.pow(2,o)),u(d=e,r),t=ao.select(t),C>0&&(t=t.transition().duration(C)),t.call(n.event)}function a(){b&&b.domain(x.range().map(function(n){return(n-k.x)/k.k}).map(x.invert)),w&&w.domain(_.range().map(function(n){return(n-k.y)/k.k}).map(_.invert))}function l(n){z++||n({type:"zoomstart"})}function c(n){a(),n({type:"zoom",scale:k.k,translate:[k.x,k.y]})}function f(n){--z||(n({type:"zoomend"}),d=null)}function s(){function n(){a=1,u(ao.mouse(i),h),c(o)}function r(){s.on(q,null).on(T,null),p(a),f(o)}var i=this,o=D.of(i,arguments),a=0,s=ao.select(t(i)).on(q,n).on(T,r),h=e(ao.mouse(i)),p=W(i);Il.call(i),l(o)}function h(){function n(){var n=ao.touches(g);return p=k.k,n.forEach(function(n){n.identifier in d&&(d[n.identifier]=e(n))}),n}function t(){var t=ao.event.target;ao.select(t).on(x,r).on(b,a),_.push(t);for(var e=ao.event.changedTouches,i=0,u=e.length;u>i;++i)d[e[i].identifier]=null;var l=n(),c=Date.now();if(1===l.length){if(500>c-M){var f=l[0];o(g,f,d[f.identifier],Math.floor(Math.log(k.k)/Math.LN2)+1),S()}M=c}else if(l.length>1){var f=l[0],s=l[1],h=f[0]-s[0],p=f[1]-s[1];y=h*h+p*p}}function r(){var n,t,e,r,o=ao.touches(g);Il.call(g);for(var a=0,l=o.length;l>a;++a,r=null)if(e=o[a],r=d[e.identifier]){if(t)break;n=e,t=r}if(r){var f=(f=e[0]-n[0])*f+(f=e[1]-n[1])*f,s=y&&Math.sqrt(f/y);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+r[0])/2,(t[1]+r[1])/2],i(s*p)}M=null,u(n,t),c(v)}function a(){if(ao.event.touches.length){for(var t=ao.event.changedTouches,e=0,r=t.length;r>e;++e)delete d[t[e].identifier];for(var i in d)return void n()}ao.selectAll(_).on(m,null),w.on(L,s).on(R,h),N(),f(v)}var p,g=this,v=D.of(g,arguments),d={},y=0,m=".zoom-"+ao.event.changedTouches[0].identifier,x="touchmove"+m,b="touchend"+m,_=[],w=ao.select(g),N=W(g);t(),l(v),w.on(L,null).on(R,t)}function p(){var n=D.of(this,arguments);m?clearTimeout(m):(Il.call(this),v=e(d=y||ao.mouse(this)),l(n)),m=setTimeout(function(){m=null,f(n)},50),S(),i(Math.pow(2,.002*Bo())*k.k),u(d,v),c(n)}function g(){var n=ao.mouse(this),t=Math.log(k.k)/Math.LN2;o(this,n,e(n),ao.event.shiftKey?Math.ceil(t)-1:Math.floor(t)+1)}var v,d,y,m,M,x,b,_,w,k={x:0,y:0,k:1},E=[960,500],A=Jo,C=250,z=0,L="mousedown.zoom",q="mousemove.zoom",T="mouseup.zoom",R="touchstart.zoom",D=N(n,"zoomstart","zoom","zoomend");return Wo||(Wo="onwheel"in fo?(Bo=function(){return-ao.event.deltaY*(ao.event.deltaMode?120:1)},"wheel"):"onmousewheel"in fo?(Bo=function(){return ao.event.wheelDelta},"mousewheel"):(Bo=function(){return-ao.event.detail},"MozMousePixelScroll")),n.event=function(n){n.each(function(){var n=D.of(this,arguments),t=k;Hl?ao.select(this).transition().each("start.zoom",function(){k=this.__chart__||{x:0,y:0,k:1},l(n)}).tween("zoom:zoom",function(){var e=E[0],r=E[1],i=d?d[0]:e/2,u=d?d[1]:r/2,o=ao.interpolateZoom([(i-k.x)/k.k,(u-k.y)/k.k,e/k.k],[(i-t.x)/t.k,(u-t.y)/t.k,e/t.k]);return function(t){var r=o(t),a=e/r[2];this.__chart__=k={x:i-r[0]*a,y:u-r[1]*a,k:a},c(n)}}).each("interrupt.zoom",function(){f(n)}).each("end.zoom",function(){f(n)}):(this.__chart__=k,l(n),c(n),f(n))})},n.translate=function(t){return arguments.length?(k={x:+t[0],y:+t[1],k:k.k},a(),n):[k.x,k.y]},n.scale=function(t){return arguments.length?(k={x:k.x,y:k.y,k:null},i(+t),a(),n):k.k},n.scaleExtent=function(t){return arguments.length?(A=null==t?Jo:[+t[0],+t[1]],n):A},n.center=function(t){return arguments.length?(y=t&&[+t[0],+t[1]],n):y},n.size=function(t){return arguments.length?(E=t&&[+t[0],+t[1]],n):E},n.duration=function(t){return arguments.length?(C=+t,n):C},n.x=function(t){return arguments.length?(b=t,x=t.copy(),k={x:0,y:0,k:1},n):b},n.y=function(t){return arguments.length?(w=t,_=t.copy(),k={x:0,y:0,k:1},n):w},ao.rebind(n,D,"on")};var Bo,Wo,Jo=[0,1/0];ao.color=an,an.prototype.toString=function(){return this.rgb()+""},ao.hsl=ln;var Go=ln.prototype=new an;Go.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),new ln(this.h,this.s,this.l/n)},Go.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),new ln(this.h,this.s,n*this.l)},Go.rgb=function(){return cn(this.h,this.s,this.l)},ao.hcl=fn;var Ko=fn.prototype=new an;Ko.brighter=function(n){return new fn(this.h,this.c,Math.min(100,this.l+Qo*(arguments.length?n:1)))},Ko.darker=function(n){return new fn(this.h,this.c,Math.max(0,this.l-Qo*(arguments.length?n:1)))},Ko.rgb=function(){return sn(this.h,this.c,this.l).rgb()},ao.lab=hn;var Qo=18,na=.95047,ta=1,ea=1.08883,ra=hn.prototype=new an;ra.brighter=function(n){return new hn(Math.min(100,this.l+Qo*(arguments.length?n:1)),this.a,this.b)},ra.darker=function(n){return new hn(Math.max(0,this.l-Qo*(arguments.length?n:1)),this.a,this.b)},ra.rgb=function(){return pn(this.l,this.a,this.b)},ao.rgb=mn;var ia=mn.prototype=new an;ia.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,i=30;return t||e||r?(t&&i>t&&(t=i),e&&i>e&&(e=i),r&&i>r&&(r=i),new mn(Math.min(255,t/n),Math.min(255,e/n),Math.min(255,r/n))):new mn(i,i,i)},ia.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),new mn(n*this.r,n*this.g,n*this.b)},ia.hsl=function(){return wn(this.r,this.g,this.b)},ia.toString=function(){return"#"+bn(this.r)+bn(this.g)+bn(this.b)};var ua=ao.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});ua.forEach(function(n,t){ua.set(n,Mn(t))}),ao.functor=En,ao.xhr=An(m),ao.dsv=function(n,t){function e(n,e,u){arguments.length<3&&(u=e,e=null);var o=Cn(n,t,null==e?r:i(e),u);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:i(n)):e},o}function r(n){return e.parse(n.responseText)}function i(n){return function(t){return e.parse(t.responseText,n)}}function u(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),l=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var i=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(i(n),e)}:i})},e.parseRows=function(n,t){function e(){if(f>=c)return o;if(i)return i=!1,u;var t=f;if(34===n.charCodeAt(t)){for(var e=t;e++f;){var r=n.charCodeAt(f++),a=1;if(10===r)i=!0;else if(13===r)i=!0,10===n.charCodeAt(f)&&(++f,++a);else if(r!==l)continue;return n.slice(t,f-a)}return n.slice(t)}for(var r,i,u={},o={},a=[],c=n.length,f=0,s=0;(r=e())!==o;){for(var h=[];r!==u&&r!==o;)h.push(r),r=e();t&&null==(h=t(h,s++))||a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new y,i=[];return t.forEach(function(n){for(var t in n)r.has(t)||i.push(r.add(t))}),[i.map(o).join(n)].concat(t.map(function(t){return i.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(u).join("\n")},e},ao.csv=ao.dsv(",","text/csv"),ao.tsv=ao.dsv(" ","text/tab-separated-values");var oa,aa,la,ca,fa=this[x(this,"requestAnimationFrame")]||function(n){setTimeout(n,17)};ao.timer=function(){qn.apply(this,arguments)},ao.timer.flush=function(){Rn(),Dn()},ao.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var sa=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Un);ao.formatPrefix=function(n,t){var e=0;return(n=+n)&&(0>n&&(n*=-1),t&&(n=ao.round(n,Pn(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((e-1)/3)))),sa[8+e/3]};var ha=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,pa=ao.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=ao.round(n,Pn(n,t))).toFixed(Math.max(0,Math.min(20,Pn(n*(1+1e-15),t))))}}),ga=ao.time={},va=Date;Hn.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){da.setUTCDate.apply(this._,arguments)},setDay:function(){da.setUTCDay.apply(this._,arguments)},setFullYear:function(){da.setUTCFullYear.apply(this._,arguments)},setHours:function(){da.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){da.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){da.setUTCMinutes.apply(this._,arguments)},setMonth:function(){da.setUTCMonth.apply(this._,arguments)},setSeconds:function(){da.setUTCSeconds.apply(this._,arguments)},setTime:function(){da.setTime.apply(this._,arguments)}};var da=Date.prototype;ga.year=On(function(n){return n=ga.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),ga.years=ga.year.range,ga.years.utc=ga.year.utc.range,ga.day=On(function(n){var t=new va(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),ga.days=ga.day.range,ga.days.utc=ga.day.utc.range,ga.dayOfYear=function(n){var t=ga.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=ga[n]=On(function(n){return(n=ga.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=ga.year(n).getDay();return Math.floor((ga.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});ga[n+"s"]=e.range,ga[n+"s"].utc=e.utc.range,ga[n+"OfYear"]=function(n){var e=ga.year(n).getDay();return Math.floor((ga.dayOfYear(n)+(e+t)%7)/7)}}),ga.week=ga.sunday,ga.weeks=ga.sunday.range,ga.weeks.utc=ga.sunday.utc.range,ga.weekOfYear=ga.sundayOfYear;var ya={"-":"",_:" ",0:"0"},ma=/^\s*\d+/,Ma=/^%/;ao.locale=function(n){return{numberFormat:jn(n),timeFormat:Yn(n)}};var xa=ao.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"], shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});ao.format=xa.numberFormat,ao.geo={},ft.prototype={s:0,t:0,add:function(n){st(n,this.t,ba),st(ba.s,this.s,this),this.s?this.t+=ba.t:this.s=ba.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var ba=new ft;ao.geo.stream=function(n,t){n&&_a.hasOwnProperty(n.type)?_a[n.type](n,t):ht(n,t)};var _a={Feature:function(n,t){ht(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,i=e.length;++rn?4*Fo+n:n,Na.lineStart=Na.lineEnd=Na.point=b}};ao.geo.bounds=function(){function n(n,t){M.push(x=[f=n,h=n]),s>t&&(s=t),t>p&&(p=t)}function t(t,e){var r=dt([t*Yo,e*Yo]);if(y){var i=mt(y,r),u=[i[1],-i[0],0],o=mt(u,i);bt(o),o=_t(o);var l=t-g,c=l>0?1:-1,v=o[0]*Zo*c,d=xo(l)>180;if(d^(v>c*g&&c*t>v)){var m=o[1]*Zo;m>p&&(p=m)}else if(v=(v+360)%360-180,d^(v>c*g&&c*t>v)){var m=-o[1]*Zo;s>m&&(s=m)}else s>e&&(s=e),e>p&&(p=e);d?g>t?a(f,t)>a(f,h)&&(h=t):a(t,h)>a(f,h)&&(f=t):h>=f?(f>t&&(f=t),t>h&&(h=t)):t>g?a(f,t)>a(f,h)&&(h=t):a(t,h)>a(f,h)&&(f=t)}else n(t,e);y=r,g=t}function e(){b.point=t}function r(){x[0]=f,x[1]=h,b.point=n,y=null}function i(n,e){if(y){var r=n-g;m+=xo(r)>180?r+(r>0?360:-360):r}else v=n,d=e;Na.point(n,e),t(n,e)}function u(){Na.lineStart()}function o(){i(v,d),Na.lineEnd(),xo(m)>Uo&&(f=-(h=180)),x[0]=f,x[1]=h,y=null}function a(n,t){return(t-=n)<0?t+360:t}function l(n,t){return n[0]-t[0]}function c(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:nka?(f=-(h=180),s=-(p=90)):m>Uo?p=90:-Uo>m&&(s=-90),x[0]=f,x[1]=h}};return function(n){p=h=-(f=s=1/0),M=[],ao.geo.stream(n,b);var t=M.length;if(t){M.sort(l);for(var e,r=1,i=M[0],u=[i];t>r;++r)e=M[r],c(e[0],i)||c(e[1],i)?(a(i[0],e[1])>a(i[0],i[1])&&(i[1]=e[1]),a(e[0],i[1])>a(i[0],i[1])&&(i[0]=e[0])):u.push(i=e);for(var o,e,g=-(1/0),t=u.length-1,r=0,i=u[t];t>=r;i=e,++r)e=u[r],(o=a(i[1],e[0]))>g&&(g=o,f=e[0],h=i[1])}return M=x=null,f===1/0||s===1/0?[[NaN,NaN],[NaN,NaN]]:[[f,s],[h,p]]}}(),ao.geo.centroid=function(n){Ea=Aa=Ca=za=La=qa=Ta=Ra=Da=Pa=Ua=0,ao.geo.stream(n,ja);var t=Da,e=Pa,r=Ua,i=t*t+e*e+r*r;return jo>i&&(t=qa,e=Ta,r=Ra,Uo>Aa&&(t=Ca,e=za,r=La),i=t*t+e*e+r*r,jo>i)?[NaN,NaN]:[Math.atan2(e,t)*Zo,tn(r/Math.sqrt(i))*Zo]};var Ea,Aa,Ca,za,La,qa,Ta,Ra,Da,Pa,Ua,ja={sphere:b,point:St,lineStart:Nt,lineEnd:Et,polygonStart:function(){ja.lineStart=At},polygonEnd:function(){ja.lineStart=Nt}},Fa=Rt(zt,jt,Ht,[-Fo,-Fo/2]),Ha=1e9;ao.geo.clipExtent=function(){var n,t,e,r,i,u,o={stream:function(n){return i&&(i.valid=!1),i=u(n),i.valid=!0,i},extent:function(a){return arguments.length?(u=Zt(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),i&&(i.valid=!1,i=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(ao.geo.conicEqualArea=function(){return Vt(Xt)}).raw=Xt,ao.geo.albers=function(){return ao.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},ao.geo.albersUsa=function(){function n(n){var u=n[0],o=n[1];return t=null,e(u,o),t||(r(u,o),t)||i(u,o),t}var t,e,r,i,u=ao.geo.albers(),o=ao.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=ao.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),l={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=u.scale(),e=u.translate(),r=(n[0]-e[0])/t,i=(n[1]-e[1])/t;return(i>=.12&&.234>i&&r>=-.425&&-.214>r?o:i>=.166&&.234>i&&r>=-.214&&-.115>r?a:u).invert(n)},n.stream=function(n){var t=u.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,i){t.point(n,i),e.point(n,i),r.point(n,i)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(u.precision(t),o.precision(t),a.precision(t),n):u.precision()},n.scale=function(t){return arguments.length?(u.scale(t),o.scale(.35*t),a.scale(t),n.translate(u.translate())):u.scale()},n.translate=function(t){if(!arguments.length)return u.translate();var c=u.scale(),f=+t[0],s=+t[1];return e=u.translate(t).clipExtent([[f-.455*c,s-.238*c],[f+.455*c,s+.238*c]]).stream(l).point,r=o.translate([f-.307*c,s+.201*c]).clipExtent([[f-.425*c+Uo,s+.12*c+Uo],[f-.214*c-Uo,s+.234*c-Uo]]).stream(l).point,i=a.translate([f-.205*c,s+.212*c]).clipExtent([[f-.214*c+Uo,s+.166*c+Uo],[f-.115*c-Uo,s+.234*c-Uo]]).stream(l).point,n},n.scale(1070)};var Oa,Ia,Ya,Za,Va,Xa,$a={point:b,lineStart:b,lineEnd:b,polygonStart:function(){Ia=0,$a.lineStart=$t},polygonEnd:function(){$a.lineStart=$a.lineEnd=$a.point=b,Oa+=xo(Ia/2)}},Ba={point:Bt,lineStart:b,lineEnd:b,polygonStart:b,polygonEnd:b},Wa={point:Gt,lineStart:Kt,lineEnd:Qt,polygonStart:function(){Wa.lineStart=ne},polygonEnd:function(){Wa.point=Gt,Wa.lineStart=Kt,Wa.lineEnd=Qt}};ao.geo.path=function(){function n(n){return n&&("function"==typeof a&&u.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=i(u)),ao.geo.stream(n,o)),u.result()}function t(){return o=null,n}var e,r,i,u,o,a=4.5;return n.area=function(n){return Oa=0,ao.geo.stream(n,i($a)),Oa},n.centroid=function(n){return Ca=za=La=qa=Ta=Ra=Da=Pa=Ua=0,ao.geo.stream(n,i(Wa)),Ua?[Da/Ua,Pa/Ua]:Ra?[qa/Ra,Ta/Ra]:La?[Ca/La,za/La]:[NaN,NaN]},n.bounds=function(n){return Va=Xa=-(Ya=Za=1/0),ao.geo.stream(n,i(Ba)),[[Ya,Za],[Va,Xa]]},n.projection=function(n){return arguments.length?(i=(e=n)?n.stream||re(n):m,t()):e},n.context=function(n){return arguments.length?(u=null==(r=n)?new Wt:new te(n),"function"!=typeof a&&u.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(u.pointRadius(+t),+t),n):a},n.projection(ao.geo.albersUsa()).context(null)},ao.geo.transform=function(n){return{stream:function(t){var e=new ie(t);for(var r in n)e[r]=n[r];return e}}},ie.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},ao.geo.projection=oe,ao.geo.projectionMutator=ae,(ao.geo.equirectangular=function(){return oe(ce)}).raw=ce.invert=ce,ao.geo.rotation=function(n){function t(t){return t=n(t[0]*Yo,t[1]*Yo),t[0]*=Zo,t[1]*=Zo,t}return n=se(n[0]%360*Yo,n[1]*Yo,n.length>2?n[2]*Yo:0),t.invert=function(t){return t=n.invert(t[0]*Yo,t[1]*Yo),t[0]*=Zo,t[1]*=Zo,t},t},fe.invert=ce,ao.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=se(-n[0]*Yo,-n[1]*Yo,0).invert,i=[];return e(null,null,1,{point:function(n,e){i.push(n=t(n,e)),n[0]*=Zo,n[1]*=Zo}}),{type:"Polygon",coordinates:[i]}}var t,e,r=[0,0],i=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=ve((t=+r)*Yo,i*Yo),n):t},n.precision=function(r){return arguments.length?(e=ve(t*Yo,(i=+r)*Yo),n):i},n.angle(90)},ao.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Yo,i=n[1]*Yo,u=t[1]*Yo,o=Math.sin(r),a=Math.cos(r),l=Math.sin(i),c=Math.cos(i),f=Math.sin(u),s=Math.cos(u);return Math.atan2(Math.sqrt((e=s*o)*e+(e=c*f-l*s*a)*e),l*f+c*s*a)},ao.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return ao.range(Math.ceil(u/d)*d,i,d).map(h).concat(ao.range(Math.ceil(c/y)*y,l,y).map(p)).concat(ao.range(Math.ceil(r/g)*g,e,g).filter(function(n){return xo(n%d)>Uo}).map(f)).concat(ao.range(Math.ceil(a/v)*v,o,v).filter(function(n){return xo(n%y)>Uo}).map(s))}var e,r,i,u,o,a,l,c,f,s,h,p,g=10,v=g,d=90,y=360,m=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(u).concat(p(l).slice(1),h(i).reverse().slice(1),p(c).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(u=+t[0][0],i=+t[1][0],c=+t[0][1],l=+t[1][1],u>i&&(t=u,u=i,i=t),c>l&&(t=c,c=l,l=t),n.precision(m)):[[u,c],[i,l]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(m)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],y=+t[1],n):[d,y]},n.minorStep=function(t){return arguments.length?(g=+t[0],v=+t[1],n):[g,v]},n.precision=function(t){return arguments.length?(m=+t,f=ye(a,o,90),s=me(r,e,m),h=ye(c,l,90),p=me(u,i,m),n):m},n.majorExtent([[-180,-90+Uo],[180,90-Uo]]).minorExtent([[-180,-80-Uo],[180,80+Uo]])},ao.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||i.apply(this,arguments)]}}var t,e,r=Me,i=xe;return n.distance=function(){return ao.geo.distance(t||r.apply(this,arguments),e||i.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(i=t,e="function"==typeof t?null:t,n):i},n.precision=function(){return arguments.length?n:0},n},ao.geo.interpolate=function(n,t){return be(n[0]*Yo,n[1]*Yo,t[0]*Yo,t[1]*Yo)},ao.geo.length=function(n){return Ja=0,ao.geo.stream(n,Ga),Ja};var Ja,Ga={sphere:b,point:b,lineStart:_e,lineEnd:b,polygonStart:b,polygonEnd:b},Ka=we(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(ao.geo.azimuthalEqualArea=function(){return oe(Ka)}).raw=Ka;var Qa=we(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},m);(ao.geo.azimuthalEquidistant=function(){return oe(Qa)}).raw=Qa,(ao.geo.conicConformal=function(){return Vt(Se)}).raw=Se,(ao.geo.conicEquidistant=function(){return Vt(ke)}).raw=ke;var nl=we(function(n){return 1/n},Math.atan);(ao.geo.gnomonic=function(){return oe(nl)}).raw=nl,Ne.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Io]},(ao.geo.mercator=function(){return Ee(Ne)}).raw=Ne;var tl=we(function(){return 1},Math.asin);(ao.geo.orthographic=function(){return oe(tl)}).raw=tl;var el=we(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(ao.geo.stereographic=function(){return oe(el)}).raw=el,Ae.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Io]},(ao.geo.transverseMercator=function(){var n=Ee(Ae),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[n[1],-n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},e([0,0,90])}).raw=Ae,ao.geom={},ao.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,i=En(e),u=En(r),o=n.length,a=[],l=[];for(t=0;o>t;t++)a.push([+i.call(this,n[t],t),+u.call(this,n[t],t),t]);for(a.sort(qe),t=0;o>t;t++)l.push([a[t][0],-a[t][1]]);var c=Le(a),f=Le(l),s=f[0]===c[0],h=f[f.length-1]===c[c.length-1],p=[];for(t=c.length-1;t>=0;--t)p.push(n[a[c[t]][2]]);for(t=+s;t=r&&c.x<=u&&c.y>=i&&c.y<=o?[[r,o],[u,o],[u,i],[r,i]]:[];f.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(u(n,t)/Uo)*Uo,y:Math.round(o(n,t)/Uo)*Uo,i:t}})}var r=Ce,i=ze,u=r,o=i,a=sl;return n?t(n):(t.links=function(n){return ar(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return ar(e(n)).cells.forEach(function(e,r){for(var i,u,o=e.site,a=e.edges.sort(Ve),l=-1,c=a.length,f=a[c-1].edge,s=f.l===o?f.r:f.l;++l=c,h=r>=f,p=h<<1|s;n.leaf=!1,n=n.nodes[p]||(n.nodes[p]=hr()),s?i=c:a=c,h?o=f:l=f,u(n,t,e,r,i,o,a,l)}var f,s,h,p,g,v,d,y,m,M=En(a),x=En(l);if(null!=t)v=t,d=e,y=r,m=i;else if(y=m=-(v=d=1/0),s=[],h=[],g=n.length,o)for(p=0;g>p;++p)f=n[p],f.xy&&(y=f.x),f.y>m&&(m=f.y),s.push(f.x),h.push(f.y);else for(p=0;g>p;++p){var b=+M(f=n[p],p),_=+x(f,p);v>b&&(v=b),d>_&&(d=_),b>y&&(y=b),_>m&&(m=_),s.push(b),h.push(_)}var w=y-v,S=m-d;w>S?m=d+w:y=v+S;var k=hr();if(k.add=function(n){u(k,n,+M(n,++p),+x(n,p),v,d,y,m)},k.visit=function(n){pr(n,k,v,d,y,m)},k.find=function(n){return gr(k,n[0],n[1],v,d,y,m)},p=-1,null==t){for(;++p=0?n.slice(0,t):n,r=t>=0?n.slice(t+1):"in";return e=vl.get(e)||gl,r=dl.get(r)||m,br(r(e.apply(null,lo.call(arguments,1))))},ao.interpolateHcl=Rr,ao.interpolateHsl=Dr,ao.interpolateLab=Pr,ao.interpolateRound=Ur,ao.transform=function(n){var t=fo.createElementNS(ao.ns.prefix.svg,"g");return(ao.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new jr(e?e.matrix:yl)})(n)},jr.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var yl={a:1,b:0,c:0,d:1,e:0,f:0};ao.interpolateTransform=$r,ao.layout={},ao.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++ea*a/y){if(v>l){var c=t.charge/l;n.px-=u*c,n.py-=o*c}return!0}if(t.point&&l&&v>l){var c=t.pointCharge/l;n.px-=u*c,n.py-=o*c}}return!t.charge}}function t(n){n.px=ao.event.x,n.py=ao.event.y,l.resume()}var e,r,i,u,o,a,l={},c=ao.dispatch("start","tick","end"),f=[1,1],s=.9,h=ml,p=Ml,g=-30,v=xl,d=.1,y=.64,M=[],x=[];return l.tick=function(){if((i*=.99)<.005)return e=null,c.end({type:"end",alpha:i=0}),!0;var t,r,l,h,p,v,y,m,b,_=M.length,w=x.length;for(r=0;w>r;++r)l=x[r],h=l.source,p=l.target,m=p.x-h.x,b=p.y-h.y,(v=m*m+b*b)&&(v=i*o[r]*((v=Math.sqrt(v))-u[r])/v,m*=v,b*=v,p.x-=m*(y=h.weight+p.weight?h.weight/(h.weight+p.weight):.5),p.y-=b*y,h.x+=m*(y=1-y),h.y+=b*y);if((y=i*d)&&(m=f[0]/2,b=f[1]/2,r=-1,y))for(;++r<_;)l=M[r],l.x+=(m-l.x)*y,l.y+=(b-l.y)*y;if(g)for(ri(t=ao.geom.quadtree(M),i,a),r=-1;++r<_;)(l=M[r]).fixed||t.visit(n(l));for(r=-1;++r<_;)l=M[r],l.fixed?(l.x=l.px,l.y=l.py):(l.x-=(l.px-(l.px=l.x))*s,l.y-=(l.py-(l.py=l.y))*s);c.tick({type:"tick",alpha:i})},l.nodes=function(n){return arguments.length?(M=n,l):M},l.links=function(n){return arguments.length?(x=n,l):x},l.size=function(n){return arguments.length?(f=n,l):f},l.linkDistance=function(n){return arguments.length?(h="function"==typeof n?n:+n,l):h},l.distance=l.linkDistance,l.linkStrength=function(n){return arguments.length?(p="function"==typeof n?n:+n,l):p},l.friction=function(n){return arguments.length?(s=+n,l):s},l.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,l):g},l.chargeDistance=function(n){return arguments.length?(v=n*n,l):Math.sqrt(v)},l.gravity=function(n){return arguments.length?(d=+n,l):d},l.theta=function(n){return arguments.length?(y=n*n,l):Math.sqrt(y)},l.alpha=function(n){return arguments.length?(n=+n,i?n>0?i=n:(e.c=null,e.t=NaN,e=null,c.end({type:"end",alpha:i=0})):n>0&&(c.start({type:"start",alpha:i=n}),e=qn(l.tick)),l):i},l.start=function(){function n(n,r){if(!e){for(e=new Array(i),l=0;i>l;++l)e[l]=[];for(l=0;c>l;++l){var u=x[l];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var o,a=e[t],l=-1,f=a.length;++lt;++t)(r=M[t]).index=t,r.weight=0;for(t=0;c>t;++t)r=x[t],"number"==typeof r.source&&(r.source=M[r.source]),"number"==typeof r.target&&(r.target=M[r.target]),++r.source.weight,++r.target.weight;for(t=0;i>t;++t)r=M[t],isNaN(r.x)&&(r.x=n("x",s)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof h)for(t=0;c>t;++t)u[t]=+h.call(this,x[t],t);else for(t=0;c>t;++t)u[t]=h;if(o=[],"function"==typeof p)for(t=0;c>t;++t)o[t]=+p.call(this,x[t],t);else for(t=0;c>t;++t)o[t]=p;if(a=[],"function"==typeof g)for(t=0;i>t;++t)a[t]=+g.call(this,M[t],t);else for(t=0;i>t;++t)a[t]=g;return l.resume()},l.resume=function(){return l.alpha(.1)},l.stop=function(){return l.alpha(0)},l.drag=function(){return r||(r=ao.behavior.drag().origin(m).on("dragstart.force",Qr).on("drag.force",t).on("dragend.force",ni)),arguments.length?void this.on("mouseover.force",ti).on("mouseout.force",ei).call(r):r},ao.rebind(l,c,"on")};var ml=20,Ml=1,xl=1/0;ao.layout.hierarchy=function(){function n(i){var u,o=[i],a=[];for(i.depth=0;null!=(u=o.pop());)if(a.push(u),(c=e.call(n,u,u.depth))&&(l=c.length)){for(var l,c,f;--l>=0;)o.push(f=c[l]),f.parent=u,f.depth=u.depth+1;r&&(u.value=0),u.children=c}else r&&(u.value=+r.call(n,u,u.depth)||0),delete u.children;return oi(i,function(n){var e,i;t&&(e=n.children)&&e.sort(t),r&&(i=n.parent)&&(i.value+=n.value)}),a}var t=ci,e=ai,r=li;return n.sort=function(e){return arguments.length?(t=e,n):t},n.children=function(t){return arguments.length?(e=t,n):e},n.value=function(t){return arguments.length?(r=t,n):r},n.revalue=function(t){return r&&(ui(t,function(n){n.children&&(n.value=0)}),oi(t,function(t){var e;t.children||(t.value=+r.call(n,t,t.depth)||0),(e=t.parent)&&(e.value+=t.value)})),t},n},ao.layout.partition=function(){function n(t,e,r,i){var u=t.children;if(t.x=e,t.y=t.depth*i,t.dx=r,t.dy=i,u&&(o=u.length)){var o,a,l,c=-1;for(r=t.value?r/t.value:0;++cs?-1:1),g=ao.sum(c),v=g?(s-l*p)/g:0,d=ao.range(l),y=[];return null!=e&&d.sort(e===bl?function(n,t){return c[t]-c[n]}:function(n,t){return e(o[n],o[t])}),d.forEach(function(n){y[n]={data:o[n],value:a=c[n],startAngle:f,endAngle:f+=a*v+p,padAngle:h}}),y}var t=Number,e=bl,r=0,i=Ho,u=0;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(i=t,n):i},n.padAngle=function(t){return arguments.length?(u=t,n):u},n};var bl={};ao.layout.stack=function(){function n(a,l){if(!(h=a.length))return a;var c=a.map(function(e,r){return t.call(n,e,r)}),f=c.map(function(t){return t.map(function(t,e){return[u.call(n,t,e),o.call(n,t,e)]})}),s=e.call(n,f,l);c=ao.permute(c,s),f=ao.permute(f,s);var h,p,g,v,d=r.call(n,f,l),y=c[0].length;for(g=0;y>g;++g)for(i.call(n,c[0][g],v=d[g],f[0][g][1]),p=1;h>p;++p)i.call(n,c[p][g],v+=f[p-1][g][1],f[p][g][1]);return a}var t=m,e=gi,r=vi,i=pi,u=si,o=hi;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:_l.get(t)||gi,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:wl.get(t)||vi,n):r},n.x=function(t){return arguments.length?(u=t,n):u},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(i=t,n):i},n};var _l=ao.map({"inside-out":function(n){var t,e,r=n.length,i=n.map(di),u=n.map(yi),o=ao.range(r).sort(function(n,t){return i[n]-i[t]}),a=0,l=0,c=[],f=[];for(t=0;r>t;++t)e=o[t],l>a?(a+=u[e],c.push(e)):(l+=u[e],f.push(e));return f.reverse().concat(c)},reverse:function(n){return ao.range(n.length).reverse()},"default":gi}),wl=ao.map({silhouette:function(n){var t,e,r,i=n.length,u=n[0].length,o=[],a=0,l=[];for(e=0;u>e;++e){for(t=0,r=0;i>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;u>e;++e)l[e]=(a-o[e])/2;return l},wiggle:function(n){var t,e,r,i,u,o,a,l,c,f=n.length,s=n[0],h=s.length,p=[];for(p[0]=l=c=0,e=1;h>e;++e){for(t=0,i=0;f>t;++t)i+=n[t][e][1];for(t=0,u=0,a=s[e][0]-s[e-1][0];f>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;u+=o*n[t][e][1]}p[e]=l-=i?u/i*a:0,c>l&&(c=l)}for(e=0;h>e;++e)p[e]-=c;return p},expand:function(n){var t,e,r,i=n.length,u=n[0].length,o=1/i,a=[];for(e=0;u>e;++e){for(t=0,r=0;i>t;t++)r+=n[t][e][1];if(r)for(t=0;i>t;t++)n[t][e][1]/=r;else for(t=0;i>t;t++)n[t][e][1]=o}for(e=0;u>e;++e)a[e]=0;return a},zero:vi});ao.layout.histogram=function(){function n(n,u){for(var o,a,l=[],c=n.map(e,this),f=r.call(this,c,u),s=i.call(this,f,c,u),u=-1,h=c.length,p=s.length-1,g=t?1:1/h;++u0)for(u=-1;++u=f[0]&&a<=f[1]&&(o=l[ao.bisect(s,a,1,p)-1],o.y+=g,o.push(n[u]));return l}var t=!0,e=Number,r=bi,i=Mi;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=En(t),n):r},n.bins=function(t){return arguments.length?(i="number"==typeof t?function(n){return xi(n,t)}:En(t),n):i},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},ao.layout.pack=function(){function n(n,u){var o=e.call(this,n,u),a=o[0],l=i[0],c=i[1],f=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,oi(a,function(n){n.r=+f(n.value)}),oi(a,Ni),r){var s=r*(t?1:Math.max(2*a.r/l,2*a.r/c))/2;oi(a,function(n){n.r+=s}),oi(a,Ni),oi(a,function(n){n.r-=s})}return Ci(a,l/2,c/2,t?1:1/Math.max(2*a.r/l,2*a.r/c)),o}var t,e=ao.layout.hierarchy().sort(_i),r=0,i=[1,1];return n.size=function(t){return arguments.length?(i=t,n):i},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},ii(n,e)},ao.layout.tree=function(){function n(n,i){var f=o.call(this,n,i),s=f[0],h=t(s);if(oi(h,e),h.parent.m=-h.z,ui(h,r),c)ui(s,u);else{var p=s,g=s,v=s;ui(s,function(n){n.xg.x&&(g=n),n.depth>v.depth&&(v=n)});var d=a(p,g)/2-p.x,y=l[0]/(g.x+a(g,p)/2+d),m=l[1]/(v.depth||1);ui(s,function(n){n.x=(n.x+d)*y,n.y=n.depth*m})}return f}function t(n){for(var t,e={A:null,children:[n]},r=[e];null!=(t=r.pop());)for(var i,u=t.children,o=0,a=u.length;a>o;++o)r.push((u[o]=i={_:u[o],parent:t,children:(i=u[o].children)&&i.slice()||[],A:null,a:null,z:0,m:0,c:0,s:0,t:null,i:o}).a=i);return e.children[0]}function e(n){var t=n.children,e=n.parent.children,r=n.i?e[n.i-1]:null;if(t.length){Di(n);var u=(t[0].z+t[t.length-1].z)/2;r?(n.z=r.z+a(n._,r._),n.m=n.z-u):n.z=u}else r&&(n.z=r.z+a(n._,r._));n.parent.A=i(n,r,n.parent.A||e[0])}function r(n){n._.x=n.z+n.parent.m,n.m+=n.parent.m}function i(n,t,e){if(t){for(var r,i=n,u=n,o=t,l=i.parent.children[0],c=i.m,f=u.m,s=o.m,h=l.m;o=Ti(o),i=qi(i),o&&i;)l=qi(l),u=Ti(u),u.a=n,r=o.z+s-i.z-c+a(o._,i._),r>0&&(Ri(Pi(o,n,e),n,r),c+=r,f+=r),s+=o.m,c+=i.m,h+=l.m,f+=u.m;o&&!Ti(u)&&(u.t=o,u.m+=s-f),i&&!qi(l)&&(l.t=i,l.m+=c-h,e=n)}return e}function u(n){n.x*=l[0],n.y=n.depth*l[1]}var o=ao.layout.hierarchy().sort(null).value(null),a=Li,l=[1,1],c=null;return n.separation=function(t){return arguments.length?(a=t,n):a},n.size=function(t){return arguments.length?(c=null==(l=t)?u:null,n):c?null:l},n.nodeSize=function(t){return arguments.length?(c=null==(l=t)?null:u,n):c?l:null},ii(n,o)},ao.layout.cluster=function(){function n(n,u){var o,a=t.call(this,n,u),l=a[0],c=0;oi(l,function(n){var t=n.children;t&&t.length?(n.x=ji(t),n.y=Ui(t)):(n.x=o?c+=e(n,o):0,n.y=0,o=n)});var f=Fi(l),s=Hi(l),h=f.x-e(f,s)/2,p=s.x+e(s,f)/2;return oi(l,i?function(n){n.x=(n.x-l.x)*r[0],n.y=(l.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(p-h)*r[0],n.y=(1-(l.y?n.y/l.y:1))*r[1]}),a}var t=ao.layout.hierarchy().sort(null).value(null),e=Li,r=[1,1],i=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(i=null==(r=t),n):i?null:r},n.nodeSize=function(t){return arguments.length?(i=null!=(r=t),n):i?r:null},ii(n,t)},ao.layout.treemap=function(){function n(n,t){for(var e,r,i=-1,u=n.length;++it?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var u=e.children;if(u&&u.length){var o,a,l,c=s(e),f=[],h=u.slice(),g=1/0,v="slice"===p?c.dx:"dice"===p?c.dy:"slice-dice"===p?1&e.depth?c.dy:c.dx:Math.min(c.dx,c.dy);for(n(h,c.dx*c.dy/e.value),f.area=0;(l=h.length)>0;)f.push(o=h[l-1]),f.area+=o.area,"squarify"!==p||(a=r(f,v))<=g?(h.pop(),g=a):(f.area-=f.pop().area,i(f,v,c,!1),v=Math.min(c.dx,c.dy),f.length=f.area=0,g=1/0);f.length&&(i(f,v,c,!0),f.length=f.area=0),u.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var u,o=s(t),a=r.slice(),l=[];for(n(a,o.dx*o.dy/t.value),l.area=0;u=a.pop();)l.push(u),l.area+=u.area,null!=u.z&&(i(l,u.z?o.dx:o.dy,o,!a.length),l.length=l.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,i=0,u=1/0,o=-1,a=n.length;++oe&&(u=e),e>i&&(i=e));return r*=r,t*=t,r?Math.max(t*i*g/r,r/(t*u*g)):1/0}function i(n,t,e,r){var i,u=-1,o=n.length,a=e.x,c=e.y,f=t?l(n.area/t):0; if(t==e.dx){for((r||f>e.dy)&&(f=e.dy);++ue.dx)&&(f=e.dx);++ue&&(t=1),1>e&&(n=0),function(){var e,r,i;do e=2*Math.random()-1,r=2*Math.random()-1,i=e*e+r*r;while(!i||i>1);return n+t*e*Math.sqrt(-2*Math.log(i)/i)}},logNormal:function(){var n=ao.random.normal.apply(ao,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=ao.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},ao.scale={};var Sl={floor:m,ceil:m};ao.scale.linear=function(){return Wi([0,1],[0,1],Mr,!1)};var kl={s:1,g:1,p:1,r:1,e:1};ao.scale.log=function(){return ru(ao.scale.linear().domain([0,1]),10,!0,[1,10])};var Nl=ao.format(".0e"),El={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};ao.scale.pow=function(){return iu(ao.scale.linear(),1,[0,1])},ao.scale.sqrt=function(){return ao.scale.pow().exponent(.5)},ao.scale.ordinal=function(){return ou([],{t:"range",a:[[]]})},ao.scale.category10=function(){return ao.scale.ordinal().range(Al)},ao.scale.category20=function(){return ao.scale.ordinal().range(Cl)},ao.scale.category20b=function(){return ao.scale.ordinal().range(zl)},ao.scale.category20c=function(){return ao.scale.ordinal().range(Ll)};var Al=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(xn),Cl=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(xn),zl=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(xn),Ll=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(xn);ao.scale.quantile=function(){return au([],[])},ao.scale.quantize=function(){return lu(0,1,[0,1])},ao.scale.threshold=function(){return cu([.5],[0,1])},ao.scale.identity=function(){return fu([0,1])},ao.svg={},ao.svg.arc=function(){function n(){var n=Math.max(0,+e.apply(this,arguments)),c=Math.max(0,+r.apply(this,arguments)),f=o.apply(this,arguments)-Io,s=a.apply(this,arguments)-Io,h=Math.abs(s-f),p=f>s?0:1;if(n>c&&(g=c,c=n,n=g),h>=Oo)return t(c,p)+(n?t(n,1-p):"")+"Z";var g,v,d,y,m,M,x,b,_,w,S,k,N=0,E=0,A=[];if((y=(+l.apply(this,arguments)||0)/2)&&(d=u===ql?Math.sqrt(n*n+c*c):+u.apply(this,arguments),p||(E*=-1),c&&(E=tn(d/c*Math.sin(y))),n&&(N=tn(d/n*Math.sin(y)))),c){m=c*Math.cos(f+E),M=c*Math.sin(f+E),x=c*Math.cos(s-E),b=c*Math.sin(s-E);var C=Math.abs(s-f-2*E)<=Fo?0:1;if(E&&yu(m,M,x,b)===p^C){var z=(f+s)/2;m=c*Math.cos(z),M=c*Math.sin(z),x=b=null}}else m=M=0;if(n){_=n*Math.cos(s-N),w=n*Math.sin(s-N),S=n*Math.cos(f+N),k=n*Math.sin(f+N);var L=Math.abs(f-s+2*N)<=Fo?0:1;if(N&&yu(_,w,S,k)===1-p^L){var q=(f+s)/2;_=n*Math.cos(q),w=n*Math.sin(q),S=k=null}}else _=w=0;if(h>Uo&&(g=Math.min(Math.abs(c-n)/2,+i.apply(this,arguments)))>.001){v=c>n^p?0:1;var T=g,R=g;if(Fo>h){var D=null==S?[_,w]:null==x?[m,M]:Re([m,M],[S,k],[x,b],[_,w]),P=m-D[0],U=M-D[1],j=x-D[0],F=b-D[1],H=1/Math.sin(Math.acos((P*j+U*F)/(Math.sqrt(P*P+U*U)*Math.sqrt(j*j+F*F)))/2),O=Math.sqrt(D[0]*D[0]+D[1]*D[1]);R=Math.min(g,(n-O)/(H-1)),T=Math.min(g,(c-O)/(H+1))}if(null!=x){var I=mu(null==S?[_,w]:[S,k],[m,M],c,T,p),Y=mu([x,b],[_,w],c,T,p);g===T?A.push("M",I[0],"A",T,",",T," 0 0,",v," ",I[1],"A",c,",",c," 0 ",1-p^yu(I[1][0],I[1][1],Y[1][0],Y[1][1]),",",p," ",Y[1],"A",T,",",T," 0 0,",v," ",Y[0]):A.push("M",I[0],"A",T,",",T," 0 1,",v," ",Y[0])}else A.push("M",m,",",M);if(null!=S){var Z=mu([m,M],[S,k],n,-R,p),V=mu([_,w],null==x?[m,M]:[x,b],n,-R,p);g===R?A.push("L",V[0],"A",R,",",R," 0 0,",v," ",V[1],"A",n,",",n," 0 ",p^yu(V[1][0],V[1][1],Z[1][0],Z[1][1]),",",1-p," ",Z[1],"A",R,",",R," 0 0,",v," ",Z[0]):A.push("L",V[0],"A",R,",",R," 0 0,",v," ",Z[0])}else A.push("L",_,",",w)}else A.push("M",m,",",M),null!=x&&A.push("A",c,",",c," 0 ",C,",",p," ",x,",",b),A.push("L",_,",",w),null!=S&&A.push("A",n,",",n," 0 ",L,",",1-p," ",S,",",k);return A.push("Z"),A.join("")}function t(n,t){return"M0,"+n+"A"+n+","+n+" 0 1,"+t+" 0,"+-n+"A"+n+","+n+" 0 1,"+t+" 0,"+n}var e=hu,r=pu,i=su,u=ql,o=gu,a=vu,l=du;return n.innerRadius=function(t){return arguments.length?(e=En(t),n):e},n.outerRadius=function(t){return arguments.length?(r=En(t),n):r},n.cornerRadius=function(t){return arguments.length?(i=En(t),n):i},n.padRadius=function(t){return arguments.length?(u=t==ql?ql:En(t),n):u},n.startAngle=function(t){return arguments.length?(o=En(t),n):o},n.endAngle=function(t){return arguments.length?(a=En(t),n):a},n.padAngle=function(t){return arguments.length?(l=En(t),n):l},n.centroid=function(){var n=(+e.apply(this,arguments)+ +r.apply(this,arguments))/2,t=(+o.apply(this,arguments)+ +a.apply(this,arguments))/2-Io;return[Math.cos(t)*n,Math.sin(t)*n]},n};var ql="auto";ao.svg.line=function(){return Mu(m)};var Tl=ao.map({linear:xu,"linear-closed":bu,step:_u,"step-before":wu,"step-after":Su,basis:zu,"basis-open":Lu,"basis-closed":qu,bundle:Tu,cardinal:Eu,"cardinal-open":ku,"cardinal-closed":Nu,monotone:Fu});Tl.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var Rl=[0,2/3,1/3,0],Dl=[0,1/3,2/3,0],Pl=[0,1/6,2/3,1/6];ao.svg.line.radial=function(){var n=Mu(Hu);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},wu.reverse=Su,Su.reverse=wu,ao.svg.area=function(){return Ou(m)},ao.svg.area.radial=function(){var n=Ou(Hu);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},ao.svg.chord=function(){function n(n,a){var l=t(this,u,n,a),c=t(this,o,n,a);return"M"+l.p0+r(l.r,l.p1,l.a1-l.a0)+(e(l,c)?i(l.r,l.p1,l.r,l.p0):i(l.r,l.p1,c.r,c.p0)+r(c.r,c.p1,c.a1-c.a0)+i(c.r,c.p1,l.r,l.p0))+"Z"}function t(n,t,e,r){var i=t.call(n,e,r),u=a.call(n,i,r),o=l.call(n,i,r)-Io,f=c.call(n,i,r)-Io;return{r:u,a0:o,a1:f,p0:[u*Math.cos(o),u*Math.sin(o)],p1:[u*Math.cos(f),u*Math.sin(f)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Fo)+",1 "+t}function i(n,t,e,r){return"Q 0,0 "+r}var u=Me,o=xe,a=Iu,l=gu,c=vu;return n.radius=function(t){return arguments.length?(a=En(t),n):a},n.source=function(t){return arguments.length?(u=En(t),n):u},n.target=function(t){return arguments.length?(o=En(t),n):o},n.startAngle=function(t){return arguments.length?(l=En(t),n):l},n.endAngle=function(t){return arguments.length?(c=En(t),n):c},n},ao.svg.diagonal=function(){function n(n,i){var u=t.call(this,n,i),o=e.call(this,n,i),a=(u.y+o.y)/2,l=[u,{x:u.x,y:a},{x:o.x,y:a},o];return l=l.map(r),"M"+l[0]+"C"+l[1]+" "+l[2]+" "+l[3]}var t=Me,e=xe,r=Yu;return n.source=function(e){return arguments.length?(t=En(e),n):t},n.target=function(t){return arguments.length?(e=En(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},ao.svg.diagonal.radial=function(){var n=ao.svg.diagonal(),t=Yu,e=n.projection;return n.projection=function(n){return arguments.length?e(Zu(t=n)):t},n},ao.svg.symbol=function(){function n(n,r){return(Ul.get(t.call(this,n,r))||$u)(e.call(this,n,r))}var t=Xu,e=Vu;return n.type=function(e){return arguments.length?(t=En(e),n):t},n.size=function(t){return arguments.length?(e=En(t),n):e},n};var Ul=ao.map({circle:$u,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Fl)),e=t*Fl;return"M0,"+-t+"L"+e+",0 0,"+t+" "+-e+",0Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/jl),e=t*jl/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/jl),e=t*jl/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});ao.svg.symbolTypes=Ul.keys();var jl=Math.sqrt(3),Fl=Math.tan(30*Yo);Co.transition=function(n){for(var t,e,r=Hl||++Zl,i=Ku(n),u=[],o=Ol||{time:Date.now(),ease:Nr,delay:0,duration:250},a=-1,l=this.length;++au;u++){i.push(t=[]);for(var e=this[u],a=0,l=e.length;l>a;a++)(r=e[a])&&n.call(r,r.__data__,a,u)&&t.push(r)}return Wu(i,this.namespace,this.id)},Yl.tween=function(n,t){var e=this.id,r=this.namespace;return arguments.length<2?this.node()[r][e].tween.get(n):Y(this,null==t?function(t){t[r][e].tween.remove(n)}:function(i){i[r][e].tween.set(n,t)})},Yl.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function i(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function u(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?$r:Mr,a=ao.ns.qualify(n);return Ju(this,"attr."+n,t,a.local?u:i)},Yl.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(i));return r&&function(n){this.setAttribute(i,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(i.space,i.local));return r&&function(n){this.setAttributeNS(i.space,i.local,r(n))}}var i=ao.ns.qualify(n);return this.tween("attr."+n,i.local?r:e)},Yl.style=function(n,e,r){function i(){this.style.removeProperty(n)}function u(e){return null==e?i:(e+="",function(){var i,u=t(this).getComputedStyle(this,null).getPropertyValue(n);return u!==e&&(i=Mr(u,e),function(t){this.style.setProperty(n,i(t),r)})})}var o=arguments.length;if(3>o){if("string"!=typeof n){2>o&&(e="");for(r in n)this.style(r,n[r],e);return this}r=""}return Ju(this,"style."+n,e,u)},Yl.styleTween=function(n,e,r){function i(i,u){var o=e.call(this,i,u,t(this).getComputedStyle(this,null).getPropertyValue(n));return o&&function(t){this.style.setProperty(n,o(t),r)}}return arguments.length<3&&(r=""),this.tween("style."+n,i)},Yl.text=function(n){return Ju(this,"text",n,Gu)},Yl.remove=function(){var n=this.namespace;return this.each("end.transition",function(){var t;this[n].count<2&&(t=this.parentNode)&&t.removeChild(this)})},Yl.ease=function(n){var t=this.id,e=this.namespace;return arguments.length<1?this.node()[e][t].ease:("function"!=typeof n&&(n=ao.ease.apply(ao,arguments)),Y(this,function(r){r[e][t].ease=n}))},Yl.delay=function(n){var t=this.id,e=this.namespace;return arguments.length<1?this.node()[e][t].delay:Y(this,"function"==typeof n?function(r,i,u){r[e][t].delay=+n.call(r,r.__data__,i,u)}:(n=+n,function(r){r[e][t].delay=n}))},Yl.duration=function(n){var t=this.id,e=this.namespace;return arguments.length<1?this.node()[e][t].duration:Y(this,"function"==typeof n?function(r,i,u){r[e][t].duration=Math.max(1,n.call(r,r.__data__,i,u))}:(n=Math.max(1,n),function(r){r[e][t].duration=n}))},Yl.each=function(n,t){var e=this.id,r=this.namespace;if(arguments.length<2){var i=Ol,u=Hl;try{Hl=e,Y(this,function(t,i,u){Ol=t[r][e],n.call(t,t.__data__,i,u)})}finally{Ol=i,Hl=u}}else Y(this,function(i){var u=i[r][e];(u.event||(u.event=ao.dispatch("start","end","interrupt"))).on(n,t)});return this},Yl.transition=function(){for(var n,t,e,r,i=this.id,u=++Zl,o=this.namespace,a=[],l=0,c=this.length;c>l;l++){a.push(n=[]);for(var t=this[l],f=0,s=t.length;s>f;f++)(e=t[f])&&(r=e[o][i],Qu(e,f,o,u,{time:r.time,ease:r.ease,delay:r.delay+r.duration,duration:r.duration})),n.push(e)}return Wu(a,o,u)},ao.svg.axis=function(){function n(n){n.each(function(){var n,c=ao.select(this),f=this.__chart__||e,s=this.__chart__=e.copy(),h=null==l?s.ticks?s.ticks.apply(s,a):s.domain():l,p=null==t?s.tickFormat?s.tickFormat.apply(s,a):m:t,g=c.selectAll(".tick").data(h,s),v=g.enter().insert("g",".domain").attr("class","tick").style("opacity",Uo),d=ao.transition(g.exit()).style("opacity",Uo).remove(),y=ao.transition(g.order()).style("opacity",1),M=Math.max(i,0)+o,x=Zi(s),b=c.selectAll(".domain").data([0]),_=(b.enter().append("path").attr("class","domain"),ao.transition(b));v.append("line"),v.append("text");var w,S,k,N,E=v.select("line"),A=y.select("line"),C=g.select("text").text(p),z=v.select("text"),L=y.select("text"),q="top"===r||"left"===r?-1:1;if("bottom"===r||"top"===r?(n=no,w="x",k="y",S="x2",N="y2",C.attr("dy",0>q?"0em":".71em").style("text-anchor","middle"),_.attr("d","M"+x[0]+","+q*u+"V0H"+x[1]+"V"+q*u)):(n=to,w="y",k="x",S="y2",N="x2",C.attr("dy",".32em").style("text-anchor",0>q?"end":"start"),_.attr("d","M"+q*u+","+x[0]+"H0V"+x[1]+"H"+q*u)),E.attr(N,q*i),z.attr(k,q*M),A.attr(S,0).attr(N,q*i),L.attr(w,0).attr(k,q*M),s.rangeBand){var T=s,R=T.rangeBand()/2;f=s=function(n){return T(n)+R}}else f.rangeBand?f=s:d.call(n,s,f);v.call(n,f,s),y.call(n,s,s)})}var t,e=ao.scale.linear(),r=Vl,i=6,u=6,o=3,a=[10],l=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in Xl?t+"":Vl,n):r},n.ticks=function(){return arguments.length?(a=co(arguments),n):a},n.tickValues=function(t){return arguments.length?(l=t,n):l},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(i=+t,u=+arguments[e-1],n):i},n.innerTickSize=function(t){return arguments.length?(i=+t,n):i},n.outerTickSize=function(t){return arguments.length?(u=+t,n):u},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Vl="bottom",Xl={top:1,right:1,bottom:1,left:1};ao.svg.brush=function(){function n(t){t.each(function(){var t=ao.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=t.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),t.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=t.selectAll(".resize").data(v,m);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return $l[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,s=ao.transition(t),h=ao.transition(o);c&&(l=Zi(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),r(s)),f&&(l=Zi(f),h.attr("y",l[0]).attr("height",l[1]-l[0]),i(s)),e(s)})}function e(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+s[+/e$/.test(n)]+","+h[+/^s/.test(n)]+")"})}function r(n){n.select(".extent").attr("x",s[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",s[1]-s[0])}function i(n){n.select(".extent").attr("y",h[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",h[1]-h[0])}function u(){function u(){32==ao.event.keyCode&&(C||(M=null,L[0]-=s[1],L[1]-=h[1],C=2),S())}function v(){32==ao.event.keyCode&&2==C&&(L[0]+=s[1],L[1]+=h[1],C=0,S())}function d(){var n=ao.mouse(b),t=!1;x&&(n[0]+=x[0],n[1]+=x[1]),C||(ao.event.altKey?(M||(M=[(s[0]+s[1])/2,(h[0]+h[1])/2]),L[0]=s[+(n[0]f?(i=r,r=f):i=f),v[0]!=r||v[1]!=i?(e?a=null:o=null,v[0]=r,v[1]=i,!0):void 0}function m(){d(),k.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),ao.select("body").style("cursor",null),q.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),z(),w({type:"brushend"})}var M,x,b=this,_=ao.select(ao.event.target),w=l.of(b,arguments),k=ao.select(b),N=_.datum(),E=!/^(n|s)$/.test(N)&&c,A=!/^(e|w)$/.test(N)&&f,C=_.classed("extent"),z=W(b),L=ao.mouse(b),q=ao.select(t(b)).on("keydown.brush",u).on("keyup.brush",v);if(ao.event.changedTouches?q.on("touchmove.brush",d).on("touchend.brush",m):q.on("mousemove.brush",d).on("mouseup.brush",m),k.interrupt().selectAll("*").interrupt(),C)L[0]=s[0]-L[0],L[1]=h[0]-L[1];else if(N){var T=+/w$/.test(N),R=+/^n/.test(N);x=[s[1-T]-L[0],h[1-R]-L[1]],L[0]=s[T],L[1]=h[R]}else ao.event.altKey&&(M=L.slice());k.style("pointer-events","none").selectAll(".resize").style("display",null),ao.select("body").style("cursor",_.style("cursor")),w({type:"brushstart"}),d()}var o,a,l=N(n,"brushstart","brush","brushend"),c=null,f=null,s=[0,0],h=[0,0],p=!0,g=!0,v=Bl[0];return n.event=function(n){n.each(function(){var n=l.of(this,arguments),t={x:s,y:h,i:o,j:a},e=this.__chart__||t;this.__chart__=t,Hl?ao.select(this).transition().each("start.brush",function(){o=e.i,a=e.j,s=e.x,h=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=xr(s,t.x),r=xr(h,t.y);return o=a=null,function(i){s=t.x=e(i),h=t.y=r(i),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){o=t.i,a=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,v=Bl[!c<<1|!f],n):c},n.y=function(t){return arguments.length?(f=t,v=Bl[!c<<1|!f],n):f},n.clamp=function(t){return arguments.length?(c&&f?(p=!!t[0],g=!!t[1]):c?p=!!t:f&&(g=!!t),n):c&&f?[p,g]:c?p:f?g:null},n.extent=function(t){var e,r,i,u,l;return arguments.length?(c&&(e=t[0],r=t[1],f&&(e=e[0],r=r[0]),o=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(l=e,e=r,r=l),e==s[0]&&r==s[1]||(s=[e,r])),f&&(i=t[0],u=t[1],c&&(i=i[1],u=u[1]),a=[i,u],f.invert&&(i=f(i),u=f(u)),i>u&&(l=i,i=u,u=l),i==h[0]&&u==h[1]||(h=[i,u])),n):(c&&(o?(e=o[0],r=o[1]):(e=s[0],r=s[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(l=e,e=r,r=l))),f&&(a?(i=a[0],u=a[1]):(i=h[0],u=h[1],f.invert&&(i=f.invert(i),u=f.invert(u)),i>u&&(l=i,i=u,u=l))),c&&f?[[e,i],[r,u]]:c?[e,r]:f&&[i,u])},n.clear=function(){return n.empty()||(s=[0,0],h=[0,0],o=a=null),n},n.empty=function(){return!!c&&s[0]==s[1]||!!f&&h[0]==h[1]},ao.rebind(n,l,"on")};var $l={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Bl=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Wl=ga.format=xa.timeFormat,Jl=Wl.utc,Gl=Jl("%Y-%m-%dT%H:%M:%S.%LZ");Wl.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?eo:Gl,eo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},eo.toString=Gl.toString,ga.second=On(function(n){return new va(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),ga.seconds=ga.second.range,ga.seconds.utc=ga.second.utc.range,ga.minute=On(function(n){return new va(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),ga.minutes=ga.minute.range,ga.minutes.utc=ga.minute.utc.range,ga.hour=On(function(n){var t=n.getTimezoneOffset()/60;return new va(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),ga.hours=ga.hour.range,ga.hours.utc=ga.hour.utc.range,ga.month=On(function(n){return n=ga.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),ga.months=ga.month.range,ga.months.utc=ga.month.utc.range;var Kl=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Ql=[[ga.second,1],[ga.second,5],[ga.second,15],[ga.second,30],[ga.minute,1],[ga.minute,5],[ga.minute,15],[ga.minute,30],[ga.hour,1],[ga.hour,3],[ga.hour,6],[ga.hour,12],[ga.day,1],[ga.day,2],[ga.week,1],[ga.month,1],[ga.month,3],[ga.year,1]],nc=Wl.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",zt]]),tc={range:function(n,t,e){return ao.range(Math.ceil(n/e)*e,+t,e).map(io)},floor:m,ceil:m};Ql.year=ga.year,ga.scale=function(){return ro(ao.scale.linear(),Ql,nc)};var ec=Ql.map(function(n){return[n[0].utc,n[1]]}),rc=Jl.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",zt]]);ec.year=ga.year.utc,ga.scale.utc=function(){return ro(ao.scale.linear(),ec,rc)},ao.text=An(function(n){return n.responseText}),ao.json=function(n,t){return Cn(n,"application/json",uo,t)},ao.html=function(n,t){return Cn(n,"text/html",oo,t)},ao.xml=An(function(n){return n.responseXML}),"function"==typeof define&&define.amd?(this.d3=ao,define(ao)):"object"==typeof module&&module.exports?module.exports=ao:this.d3=ao}(); ================================================ FILE: html/assets/js/lib/jc.js ================================================ !function(a){var b;if("function"==typeof define&&define.amd&&(define(a),b=!0),"object"==typeof exports&&(module.exports=a(),b=!0),!b){var c=window.Cookies,d=window.Cookies=a();d.noConflict=function(){return window.Cookies=c,d}}}(function(){function a(){for(var a=0,b={};a{e.addEventListener("click",t=>{let n=t.target.parentElement.parentElement.parentElement.children[0].children;n[0].children[0].innerHTML=t.target.innerHTML,n[0].children[0].value=e.dataset.value,n[1].children[0].innerHTML=t.target.innerHTML;let r=n[0].dataset.onchange;void 0!==r&&window[r](t.target.innerHTML,e.dataset.value),t.target.parentElement.parentElement.classList.remove("is-active"),t.target.parentElement.parentElement.parentElement.classList.remove("is-open")})}),document.querySelectorAll(".choices__inner").forEach(e=>{e.addEventListener("click",()=>{e.parentElement.children[1].classList.toggle("is-active"),e.parentElement.classList.toggle("is-open")})}),document.querySelectorAll(".choices").forEach(e=>{e.addEventListener("blur",()=>{e.children[1].classList.remove("is-active"),e.classList.remove("is-open")})}); ================================================ FILE: html/assets/js/locales/collection.js ================================================ const locales = { "en-US": { "LANG_JAP": "Japanese", "LANG_GER": "German", "LANG_ENG": "English", "LANG_RUS": "Russian", "LANG_SPA": "Spanish", "LANG_SWE": "Swedish", "LANG_FRE": "French", "LANG_DUT": "Dutch", "LANG_HUN": "Hungarian", "LANG_SLV": "Slovenian", "SETTINGS_COOKIE_ACCEPT": "Thanks for helping to improve Jotoba!", "SETTINGS_COOKIE_REJECT": "We will no longer collect any data.", "UPLOAD_NO_INPUT": "You need to enter a URL or upload a file!", "RADICAL_API_UNREACHABLE": "Could not reach Radical API.", "SPEECH_LISTEN_YES": "Yes", "SPEECH_LISTEN_NO": "No", "SPEECH_NO_PERMISSION": "Need permissions to perform speech recognition!", "SPEECH_ABORT": "Speech recognition aborted.", "SPEECH_NO_VOICE": "No voice input received!", "SPEECH_NOT_SUPPORTED": "Your browser does not support speech recognition!", "QOL_FURI_COPIED": "furigana copied to clipboard.", "QOL_FURI_COPIED_ALL": "full furigana copied to clipboard", "QOL_KANJI_COPIED": "kanji copied to clipboard.", "QOL_KANA_COPIED": "kana copied to clipboard.", "QOL_SENTENCE_COPIED": "copied to clipboard.", "QOL_AUDIO_COPIED": "Audio URL copied to clipboard", "QOL_LINK_COPIED": "Link URL copied to clipboard", }, "de-DE": { "LANG_JAP": "Japanisch", "LANG_GER": "Deutsch", "LANG_ENG": "Englisch", "LANG_RUS": "Russisch", "LANG_SPA": "Spanisch", "LANG_SWE": "Schwedisch", "LANG_FRE": "Französisch", "LANG_DUT": "Niederländisch", "LANG_HUN": "Ungarisch", "LANG_SLV": "Slowenisch", "SETTINGS_COOKIE_ACCEPT": "Vielen Dank für die Unterstützung!", "SETTINGS_COOKIE_REJECT": "Es werden keine Daten mehr gesammelt!", "UPLOAD_NO_INPUT": "Du musst entweder eine Datei hochladen oder eine URL einfügen, welche auf ein Bild zeigt!", "RADICAL_API_UNREACHABLE": "Konnte die Radikal-API nicht erreichen.", "SPEECH_LISTEN_YES": "Ja", "SPEECH_LISTEN_NO": "Nein", "SPEECH_NO_PERMISSION": "Jotoba benötigt Berechtigungen für die Spracherkennung!", "SPEECH_ABORT": "Spracherkennung abgebrochen.", "SPEECH_NO_VOICE": "Wir konnten Deine Stimme nicht hören!", "SPEECH_NOT_SUPPORTED": "Dein Browser unterstützt dieses Feature leider nicht!", "QOL_FURI_COPIED": "Furigana in Zwischenablage kopiert", "QOL_FURI_COPIED_ALL": "Vollständiges Furigana in Zwischenablage kopiert", "QOL_KANJI_COPIED": "Kanji in Zwischenablage kopiert.", "QOL_KANA_COPIED": "Kana in Zwischenablage kopiert", "QOL_SENTENCE_COPIED": "Text in Zwischenablage kopiert", "QOL_AUDIO_COPIED": "Audio URL in Zwischenablage kopiert", "QOL_LINK_COPIED": "Link URL in Zwischenablage kopiert", }, "hu": { "LANG_JAP": "Japán", "LANG_GER": "Német", "LANG_ENG": "Angol", "LANG_RUS": "Orosz", "LANG_SPA": "Spanyol", "LANG_SWE": "Svéd", "LANG_FRE": "Francia", "LANG_DUT": "Holland", "LANG_HUN": "Magyar", "LANG_SLV": "Szlovák", "SPEECH_LISTEN_YES": "Igen", "SPEECH_LISTEN_NO": "Nem", }, }; // Returns the text with the given identifier from the currently selected language function getText(identifier) { let lang = Cookies.get("page_lang") || "en-US"; return locales[lang][identifier] || locales["en-US"][identifier] || identifier; } ================================================ FILE: html/assets/js/mobile.js ================================================ /** * This JS-File contains some Improvements specifically for mobile views */ // Mark the currently selected search type (only used for mobile so far) markCurrentSearchType(); // On Start, check if mobile view is enabled. If yes, activate the btn Util.awaitDocumentReady(prepareMobilePageBtn); // Variables used in mobiles' easy-use btn var jmpBtn; var kanjiDiv; var jmpBtnPointsTop; // Marks the current search's type, so it can be displayed in another color function markCurrentSearchType() { let searchType = $('#search-type').val(); for (let i = 0; i < 4; i ++) { if (i == searchType) { $('.choices__item[data-value="'+i+'"]').addClass('selected'); } else { $('.choices__item[data-value="'+i+'"]').removeClass('selected'); } } } // Prepares the easy-use Btn for mobile devices function prepareMobilePageBtn() { // The Jmp Btn and Kanji elements jmpBtn = $("#jmp-btn"); kanjiDiv = document.getElementById("secondaryInfo"); // Variables used in the following two functions jmpBtnPointsTop = false; if (kanjiDiv !== null) { // Prepare the Kanji jmp and its button var kanjiPos = kanjiDiv.offsetTop; jmpBtn.removeClass("hidden"); // Window Scroll checks window.onscroll = function() { if (Util.getBrowserWidth() < 600 && (document.body.scrollTop > kanjiPos - 500 || document.documentElement.scrollTop > kanjiPos - 500)) { jmpBtn.css("transform", "rotate(0deg)"); jmpBtnPointsTop = true; } else { jmpBtn.css("transform", "rotate(180deg)"); jmpBtnPointsTop = false; } } } } // Jumps to the top or kanji part function jumpToTop() { if (jmpBtnPointsTop) { (!window.requestAnimationFrame) ? window.scrollTo(0, 0) : Util.scrollTo(0, 400); } else { let topOffset = kanjiDiv.offsetTop; (!window.requestAnimationFrame) ? window.scrollTo(0, topOffset) : Util.scrollTo(topOffset, 400); } } // Toggles the options for different input and page jumping on / off function toggleMobileNav() { $('.mobile-nav').toggleClass('hidden'); } ================================================ FILE: html/assets/js/page/infoPage.js ================================================ // On load, check if Shortcuts should be shown. They are useless for mobile devices if( /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) ) { document.getElementById("shortcutInfo").classList.add("hidden"); document.getElementsByClassName("help-cat")[0].classList.remove("help-cat"); } ================================================ FILE: html/assets/js/page/kanjiPage.js ================================================ /** * This JS-File implements the Kanji Animation and compound dropdown features */ // Kanji settings var kanjiSettings = []; const Animation = { none: 0, forward: 1, backwards: 2 }; // Default kanji speed (only used on init) let speed = localStorage.getItem("kanji_speed") || 1; // Initially set the speed tags according to the settings Util.awaitDocumentReady(() => { $(".speed-tag").each((i, e) => { e.children[1].innerHTML = (Math.round(Settings.display.kanjiAnimationSpeed.val * 100) + "%"); e.nextElementSibling.value = Settings.display.kanjiAnimationSpeed.val; }); }); // Initially prepare svg-settings $(".anim-container").each((i, e) => { // The Kanji let kanjiLiteral = e.id.split("_")[0]; // Figure out how many paths there are let paths = getPaths(kanjiLiteral); // Specific settings kanjiSettings[kanjiLiteral] = { strokeCount: paths.length, speed: speed, timestamp: 0, index: 0, showNumbers: false, animationDirection: Animation.none, isAutomated: false, } // Needs the settings to be loaded first Util.awaitDocumentReady(() => { kanjiSettings[kanjiLiteral].index = Settings.display.showKanjiOnLoad.val ? paths.length : 0; kanjiSettings[kanjiLiteral].showNumbers = Settings.display.showKanjiNumbers.val; // If the user wants to hide Kanji on load if (!Settings.display.showKanjiOnLoad.val) { $("#" + kanjiLiteral + "_svg > svg path:not(.bg)").each((i, e) => { e.classList.add("hidden"); e.style.strokeDashoffset = e.getTotalLength(); }); } // If user wants to hide numbers: hide them if (!Settings.display.showKanjiNumbers.val) { $(e).find("text").addClass("hidden"); } }); }); // Adjust svg's draw speed using the slider $('.speedSlider:not(.settings)').on('input', function () { let kanjiLiteral = this.dataset.kanji; kanjiSettings[kanjiLiteral].speed = this.value; let ident = kanjiLiteral + "_speed"; let speed = Math.round((parseFloat(this.value) * 100)); $("#" + ident).html(speed + "%"); sessionStorage.setItem(ident, speed); let playBtnState = document.getElementById(kanjiLiteral + "_play").dataset.state; if (kanjiSettings[kanjiLiteral].animationDirection !== Animation.none && playBtnState === "pause") { refreshAnimations(kanjiLiteral); } }); // Returns the paths related to the kanji function getPaths(kanjiLiteral) { let svg = document.getElementById(kanjiLiteral + "_svg").firstElementChild; return svg.querySelectorAll("path:not(.bg)"); } // Refresh the currently running animation. Used for changing the current animation speed async function refreshAnimations(kanjiLiteral) { let paths = getPaths(kanjiLiteral); let startTime = prepareAutoplay(kanjiLiteral); // Iterate all strokes that are potentially animating for (let i = 0; i < paths.length; i++) { let len = paths[i].getTotalLength(); let currentLen = $(paths[i]).css("stroke-dashoffset"); // Stroke is currently animating if (len !== currentLen && currentLen !== "0px") { // Reset current animation $(paths[i]).css("stroke-dashoffset", $(paths[i]).css("stroke-dashoffset")); // Animate and wait if the animations was automated let animationPromise = doAnimationStep(kanjiLiteral, paths[i], kanjiSettings[kanjiLiteral].animationDirection === Animation.forward, false); if (kanjiSettings[kanjiLiteral].isAutomated) { kanjiSettings[kanjiLiteral].index = i + 1; await animationPromise; if (startTime < kanjiSettings[kanjiLiteral].timestamp) { return; } } toggleNumbers(kanjiLiteral); } } // Conclude potential autoplay if (kanjiSettings[kanjiLiteral].isAutomated) { concludeAutoplay(kanjiLiteral); } } // Prepares the required steps to start auto-playing an animation function prepareAutoplay(kanjiLiteral) { let startTime = Date.now(); kanjiSettings[kanjiLiteral].timestamp = startTime; kanjiSettings[kanjiLiteral].isAutomated = true; let playBtn = document.getElementById(kanjiLiteral + "_play"); playBtn.dataset.state = "pause"; playBtn.children[0].classList.add("hidden"); playBtn.children[1].classList.remove("hidden"); return startTime; } // Prepares the last steps to end auto-playing an animation function concludeAutoplay(kanjiLiteral) { let playBtn = document.getElementById(kanjiLiteral + "_play"); kanjiSettings[kanjiLiteral].isAutomated = false; playBtn.dataset.state = "play"; playBtn.children[0].classList.remove("hidden"); playBtn.children[1].classList.add("hidden"); } // Based on the current state, show or pause the animation async function doOrPauseAnimation(kanjiLiteral) { let playBtn = document.getElementById(kanjiLiteral + "_play"); if (playBtn.dataset.state === "play") { if (kanjiSettings[kanjiLiteral].index == kanjiSettings[kanjiLiteral].strokeCount) { await undoAnimation(kanjiLiteral, true); } doAnimation(kanjiLiteral); return; } pauseAnimation(kanjiLiteral); } // Automatically draws the whole image async function doAnimation(kanjiLiteral) { let startTime = prepareAutoplay(kanjiLiteral); let paths = getPaths(kanjiLiteral); for (let index = kanjiSettings[kanjiLiteral].index; index < paths.length; index++) { if (startTime < kanjiSettings[kanjiLiteral].timestamp) { return; } kanjiSettings[kanjiLiteral].index++; kanjiSettings[kanjiLiteral].animationDirection = Animation.forward; await doAnimationStep(kanjiLiteral, paths[index], true); if (startTime < kanjiSettings[kanjiLiteral].timestamp) { return; } toggleNumbers(kanjiLiteral); kanjiSettings[kanjiLiteral].animationDirection = Animation.none; } concludeAutoplay(kanjiLiteral); } // Automatically removes the whole image async function undoAnimation(kanjiLiteral, awaitLast) { let startTime = prepareAutoplay(kanjiLiteral); let paths = getPaths(kanjiLiteral); for (kanjiSettings[kanjiLiteral].index > -1; kanjiSettings[kanjiLiteral].index--;) { if (startTime < kanjiSettings[kanjiLiteral].timestamp) { return; } kanjiSettings[kanjiLiteral].animationDirection = Animation.backwards; let awaitAnimationStep = awaitLast && kanjiSettings[kanjiLiteral].index === 0; await doAnimationStep(kanjiLiteral, paths[kanjiSettings[kanjiLiteral].index], false, !awaitAnimationStep); if (startTime < kanjiSettings[kanjiLiteral].timestamp) { return; } toggleNumbers(kanjiLiteral); kanjiSettings[kanjiLiteral].animationDirection = Animation.none; } kanjiSettings[kanjiLiteral].index = 0; concludeAutoplay(kanjiLiteral); } // Pauses the animation midway async function pauseAnimation(kanjiLiteral) { kanjiSettings[kanjiLiteral].timestamp = Date.now(); let playBtn = document.getElementById(kanjiLiteral + "_play"); playBtn.dataset.state = "play"; playBtn.children[0].classList.remove("hidden"); playBtn.children[1].classList.add("hidden"); } // Draws or removes the given path async function doAnimationStep(kanjiLiteral, path, forward, fastReset) { path.classList.remove("hidden"); let len = path.getTotalLength(); let drawTime = len * 10 * (!fastReset ? (1 / kanjiSettings[kanjiLiteral].speed) : 0.5); let transition = "transition: stroke-dashoffset " + drawTime + "ms ease 0s, stroke " + (forward ? 0 : drawTime) + "ms ease 0s;"; let dashArray = "stroke-dasharray: " + len + "," + len + ";"; let strokeDashoffset = "stroke-dashoffset: " + (forward ? "0;" : (len + ";")); path.style = transition + dashArray + strokeDashoffset + (forward ? "" : "stroke: var(--danger);"); return new Promise(resolve => setTimeout(resolve, !fastReset ? drawTime : 0)); } // Draws or removes the given path based on the button clicked async function doAnimationStep_onClick(kanjiLiteral, direction) { let startTime = Date.now(); kanjiSettings[kanjiLiteral].timestamp = startTime; concludeAutoplay(kanjiLiteral); if (kanjiSettings[kanjiLiteral].index + direction == -1 || kanjiSettings[kanjiLiteral].index + direction > kanjiSettings[kanjiLiteral].strokeCount) { return; } let path = getPaths(kanjiLiteral)[direction > 0 ? kanjiSettings[kanjiLiteral].index : kanjiSettings[kanjiLiteral].index - 1]; kanjiSettings[kanjiLiteral].index += direction; kanjiSettings[kanjiLiteral].animationDirection = direction > 0 ? Animation.forward : Animation.backwards; await doAnimationStep(kanjiLiteral, path, direction > 0); if (startTime <= kanjiSettings[kanjiLiteral].timestamp) { kanjiSettings[kanjiLiteral].animationDirection = Animation.none; } toggleNumbers(kanjiLiteral); } // Sets the SVG numbers visible / invisible or updates them if the param was not provided function toggleNumbers(kanjiLiteral, visible) { let svg = document.getElementById(kanjiLiteral + "_svg").firstElementChild; let texts = svg.querySelectorAll("text"); if (visible !== undefined && !Settings.display.showKanjiNumbers.val) { kanjiSettings[kanjiLiteral].showNumbers = visible; } if (kanjiSettings[kanjiLiteral].showNumbers) { for (let i = 0; i < texts.length; i++) { if (i < kanjiSettings[kanjiLiteral].index) { texts[i].classList.remove("hidden"); } else { texts[i].classList.add("hidden"); } } } else { for (let i = 0; i < texts.length; i++) { texts[i].classList.add("hidden"); } } } // Toggles compounds visible / hidden function toggleCompounds(event) { let compoundParent = event.target.parentElement.parentElement; compoundParent.children[compoundParent.children.length - 1].classList.toggle("hidden"); event.target.parentElement.children[0].classList.toggle("closed"); } // Toggle all compounds on keypress $(document).on("keypress", (event) => { if ($('input:text').is(":focus")) return; if (event.key == "c") { $(".compounds-dropdown").toggleClass("closed"); $(".compounds-parent").toggleClass("hidden"); } }); /* -- Kanji decomposition tree -- */ var pendingRequests = 0; var lastTreeLiteral = ""; // Generates the tree diagram async function generateTreeDiagram(kanjiLiteral) { var width = 1000, height = 1000, i = 0; lastTreeLiteral = kanjiLiteral; var tree = d3.layout.tree() .size([height, width]); // set visible document.getElementById("tree-target").innerHTML = ""; document.getElementById("backdrop").classList.remove("hidden"); // Add the SVG to the body var svg = d3.select("#tree-target").append("svg") .classed("svg-content-responsive", true) .classed("svg-container", true) .attr("preserveAspectRatio", "xMinYMin meet") .attr("viewBox", "0 0 " + width + " " + height) .append("g"); // Build the tree let treeData = await API.getGraphData(kanjiLiteral); root = treeData.tree; // Compute the new tree layout var nodes = tree.nodes(root).reverse(), links = tree.links(nodes); // Normalize for fixed-depth nodes.forEach((d) => { d.y = d.depth * 100; }); // Declare the nodes var node = svg.selectAll("g.node") .data(nodes, (d) => { return d.id || (d.id = ++i); }); // Declare the links var link = svg.selectAll("path.link") .data(links, (d) => { return d.target.id; }); // Enter the nodes var nodeEnter = node.enter().append("g") .attr("class", "node") .attr("transform", (d) => { return "translate(" + d.x + "," + d.y + ")"; }); // Circle style, color, fill nodeEnter.append("circle") .attr("r", 25) .style("fill", "rgba(222,227,231,255)"); // Text nodeEnter.append("text") .attr("y", (d) => { // Text offset return d.children || d._children ? 5 : 5; }) .attr("text-anchor", "middle") .text((d) => { return d.name; }) .style("fill-opacity", 1) .attr("has_data", (d) => {return d.literal_available}); // Straight lines link.enter().insert("line") .attr("class", "link") .attr("x1", (d) => { return d.source.x; }) .attr("y1", (d) => { return d.source.y; }) .attr("x2", (d) => { return d.target.x; }) .attr("y2", (d) => { return d.target.y; }); // Move lines in front of circle to hide the lines (only needed for straight lines) document.querySelectorAll("#tree-target .link").forEach(e => { var node = e; var parent = e.parentNode; parent.removeChild(node); parent.prepend(e); }); // Figure out how many requests are required const srcUrl = "/assets/svg/glyphes/"; document.querySelectorAll("#tree-target text").forEach((e) => { getSvgContent(e, srcUrl + e.innerHTML + ".svg"); pendingRequests++; }); svg = document.querySelector('#tree-target svg'); // Calculate new Viewbox of SVG containing all children const { xMin, xMax, yMin, yMax } = [...svg.children].reduce((acc, el) => { const { x, y, width, height } = el.getBBox(); if (!acc.xMin || x < acc.xMin) acc.xMin = x; if (!acc.xMax || x + width > acc.xMax) acc.xMax = x + width; if (!acc.yMin || y < acc.yMin) acc.yMin = y; if (!acc.yMax || y + height > acc.yMax) acc.yMax = y + height; return acc; }, {}); // Update viewbox const viewbox = `${xMin} ${yMin} ${xMax - xMin} ${yMax - yMin}`; svg.setAttribute('viewBox', viewbox); // Set toggler content if available if (treeData.has_big) { let toggler = document.getElementById("tree-toggle"); toggler.classList.remove("hidden"); if (Settings.search.showFullGraph.val) { toggler.classList.add("detailed"); } } } // Tries to replace the given target with an SVG using the given URL function getSvgContent(target, url) { $.ajax({ type : "GET", url : url, // Called upon server reponse success : function(result) { // Check if the result is actually an SVG or rather the 404 page if (typeof result !== "object") { return; } // Add action btn to the circle if possible if (target.getAttribute("has_data") === "true") { target.previousElementSibling.classList.add("clickable"); target.previousElementSibling.addEventListener("click", () => { location.href = JotoTools.createUrl(target.innerHTML, 1); }); } // Replace text element with SVG target.replaceWith(result.firstElementChild.firstElementChild); }, // Handle unexpected request errors error : function(result) { console.log("caught error on decomposition tree:", result); } }); } // Called upon clicking on the toggle checkbox for a decomposition graph: rerenders the graph in the toggled complexity function onGraphToggleCheckboxClick(event) { if (window.plausible) { plausible("toggle", {props: {name: "Tree toggle"}}); } Settings.alterSearch('showFullGraph', !Settings.search.showFullGraph.val); generateTreeDiagram(lastTreeLiteral); let toggler = document.getElementById("tree-toggle"); toggler.classList.toggle("detailed"); } ================================================ FILE: html/assets/js/page/newsPage.js ================================================ // [news] is declared directly in the html prepareNews(); function prepareNews() { let list = document.getElementById("news-list"); for (info of news) { list.innerHTML += '
'; list.lastChild.firstChild.firstChild.innerHTML = info.title; list.lastChild.children[1].innerHTML = Util.toLocaleDateString(info.creation_time * 1000); list.lastChild.lastChild.innerHTML = Util.decodeHtml(info.html); } } ================================================ FILE: html/assets/js/page/overlay/notifications.js ================================================ // On Start -> Try and load the latest data requestShortData(); // Start a query to receive current notifications async function requestShortData() { if (!localStorage) { return; } var data = {"after": parseInt(localStorage.getItem("notification_timestamp") || 00000000)}; $.ajax({ type : "POST", url : "/api/news/short", data: JSON.stringify(data), headers: { 'Content-Type': 'application/json' }, success : function(result) { parseShortNotificationResults(result); }, error : function(result) { console.log(result); } }); } // Parses the results of /api/news/short API calls and displays them async function parseShortNotificationResults(results) { // If nothing was received, show a message that there are no new updates if (results.entries.length == 0) { $("#no-result").removeClass("hidden"); return; } // Else, show the results let notifiContent = document.getElementById("notification-content"); for (let result of results.entries) { let creationDateString = Util.toLocaleDateString(result.creation_time * 1000); var entryHtml = '
' + '
' + result.title + '
' + '
' + creationDateString + '
' + '
' + result.html + '
' +'
'; notifiContent.innerHTML = entryHtml + notifiContent.innerHTML; document.getElementsByClassName("notificationBtn")[0].classList.add("update"); } } // Shows the detailed information of the target element using its ID function requestLongData(event, id) { if (event.target.nodeName === "IMG") { return; } var data = {"id": id}; $.ajax({ type : "POST", url : "/api/news/detailed", data: JSON.stringify(data), headers: { 'Content-Type': 'application/json' }, success : function(result) { parseDetailedNotificationResults(result); }, error : function(result) { console.log(result); } }); } // Parses the results of /api/news/detailed API calls and displays them async function parseDetailedNotificationResults(result) { $("#notification-detail-head").html(result.entry.title); $("#notification-detail-body").html(result.entry.html); $("#notificationModal").modal('show'); } // Opens the short-informations for notifications function toggleNotifications(event) { let container = $('#notifications-container'); // Check if notification is opened already if (!container.hasClass("hidden")) { closeNotifications(); return; } // Prevent click event to pass through to the body event.stopPropagation(); // Set the timestamp localStorage.setItem("notification_timestamp", Math.floor(Date.now() / 1000)); container[0].classList.remove("hidden"); // Make clicks outside the element close it $(document).one("click", function() { closeNotifications(); container.off("click"); }); container.click(function(event){ event.stopPropagation(); }); } // Closes the short-informations for notifications function closeNotifications() { document.getElementById("notifications-container").classList.add("hidden"); document.getElementsByClassName("notificationBtn")[0].classList.remove("update"); } // Calls a page that displays (more-or-less) all past notifications function showAllNotifications() { Util.loadUrl(JotoTools.getPageUrl("news")); } ================================================ FILE: html/assets/js/page/overlay/settings.js ================================================ /* * This JS-File everything related to the settings overlay */ function Settings() { } // Analytics. Use your own or leave empty var analyticsUrl = ''; var analyticsAttributes = null; // Default "language" settings Settings.language = { searchLang: { isCookie: true, id: "default_lang", dataType: "string", val: JotoTools.toJotobaLanguage(Cookies.get("default_lang") || navigator.language || navigator.userLanguage || "en-US") }, pageLang: { isCookie: true, id: "page_lang", dataType: "string", val: Cookies.get("page_lang") || "en-US" }, } // Default "search" settings Settings.search = { alwaysShowEnglish: { isCookie: true, id: "show_english", dataType: "boolean", val: true }, showEnglishOnTop: { isCookie: true, id: "show_english_on_top", dataType: "boolean", val: false }, showExampleSentences: { isCookie: true, id: "show_sentences", dataType: "boolean", val: true }, showFurigana: { isCookie: true, id: "sentence_furigana", dataType: "boolean", val: true }, focusSearchbar: { isCookie: false, id: "focus_searchbar", dataType: "boolean", val: false }, selectSearchbarContent: { isCookie: false, id: "select_searchbar_content", dataType: "boolean", val: false }, itemsPerPage: { isCookie: true, id: "items_per_page", dataType: "int", val: 10 }, kanjiPerPage: { isCookie: true, id: "kanji_page_size", dataType: "int", val: 4 }, showFullGraph: { isCookie: false, id: "show_full_graph", dataType: "boolean", val: true }, } // Default "display" settings Settings.display = { theme: { isCookie: false, id: "theme", dataType: "string", val: "light" }, kanjiAnimationSpeed: { isCookie: false, id: "kanji_speed", dataType: "float", val: 1 }, showKanjiOnLoad: { isCookie: false, id: "show_kanji_on_load", dataType: "boolean", val: true }, showKanjiNumbers: { isCookie: false, id: "show_kanji_numbers", dataType: "boolean", val: false }, } // Default "other" settings Settings.other = { enableDoubleClickCopy: { isCookie: false, id: "dbl_click_copy", dataType: "boolean", val: true }, trackingAllowed: { isCookie: false, id: "tracking_allowed", dataType: "boolean", val: true }, firstVisit: { isCookie: false, id: "first_time", dataType: "boolean", val: true } } // Saves a settings-object into localStorage / Cookies Settings.saveSettings = function (object) { for (let [key, entry] of Object.entries(object)) { if (entry.isCookie) { Cookies.set(entry.id, entry.val, { path: '/', expires: 365 }); } else { localStorage.setItem(entry.id, entry.val); } } } // Loads a settings-object from localStorage / Cookies Settings.loadSettings = function (object) { for (let [key, entry] of Object.entries(object)) { let data = ""; // Try to get the data if (entry.isCookie) { data = Cookies.get(entry.id, entry.val); } else { data = localStorage.getItem(entry.id); } // Not found => ignore if (!data) { continue; } // Found => parse and overwrite switch (entry.dataType) { case "boolean": object[key].val = Util.toBoolean(data); break; case "int": object[key].val = parseInt(data); break; case "float": object[key].val = parseFloat(data); break; default: object[key].val = data; } } } // Alters a "language" setting and reloads if needed Settings.alterLanguage = function (key, value, reloadPage) { Settings.language[key].val = value; Settings.saveSettings(Settings.language); if (reloadPage) { location.reload(); } } // Used for the Choices-Hook on function calls alterLanguage_search = function (html, value) { let reloadPage = window.location.href.includes("/search"); Settings.alterLanguage("searchLang", value, reloadPage); } // Used for the Choices-Hook on function calls alterLanguage_page = function (html, value) { Settings.alterLanguage("pageLang", value, true); } // Alters a "search" setting and reloads if needed Settings.alterSearch = function (key, value, updateSub) { Settings.search[key].val = value; Settings.saveSettings(Settings.search); if (updateSub) { OverlaySettings.updateSubEntries(); } } // Alters a "display" setting and reloads if needed Settings.alterDisplay = function (key, value) { Settings.display[key].val = value; Settings.saveSettings(Settings.display); } // Alters a "other" setting and reloads if needed Settings.alterOther = function (key, value) { Settings.other[key].val = value; Settings.saveSettings(Settings.other); } // Opens the Settings Overlay and accepts cookie usage Settings.trackingAccepted = function (manuallyCalled) { if (manuallyCalled) Util.showMessage("success", getText("SETTINGS_COOKIE_ACCEPT")); Settings.alterOther("trackingAllowed", true); loadAnalytics(); Util.setMdlCheckboxState("tracking_settings", true); } // Revokes the right to store user Cookies Settings.trackingDeclined = function (manuallyCalled) { if (manuallyCalled) Util.showMessage("success", getText("SETTINGS_COOKIE_REJECT")); Settings.alterOther("trackingAllowed", false); Util.setMdlCheckboxState("tracking_settings", false); } // Special handling for tracking_allowed Settings.onTrackingAcceptChange = function (allowed) { if (allowed) { Settings.trackingAccepted(true); } else { Settings.trackingDeclined(true); } } // Prepare the settings overlay's data initially async function prepareSettingsOverlay() { // Prepare the Settings Overlay OverlaySettings.updateDropdowns(); OverlaySettings.updateCheckboxes(); OverlaySettings.updateSubEntries(); OverlaySettings.updateSliders(); OverlaySettings.updateInputs(); }; // Load Settings on initial load Util.awaitDocumentInteractive(() => { Settings.loadSettings(Settings.search); Settings.loadSettings(Settings.display); Settings.loadSettings(Settings.other); }); Util.awaitDocumentReady(() => { Settings.loadSettings(Settings.language); prepareSettingsOverlay(); // Add the info-icon on initial page load if needed if (Settings.other.firstVisit.val) { $(".infoBtn").addClass("new"); } // Load analytics if allowed -> At this points any external source with high prio has already been loaded in and should have overwritten the analytics vars if (Settings.other.trackingAllowed.val && analyticsUrl.length > 0) { loadAnalytics(); } }); function loadAnalytics() { Util.awaitDocumentReady(() => { Util.loadScript(analyticsUrl, true, analyticsAttributes, () => { // Prepare any css-based events after the script is ready let buttons = document.querySelectorAll(".p"); for (var i = 0; i < buttons.length; i++) { buttons[i].addEventListener('click', handleEvent); } function handleEvent(event) { if (window.plausible) { let attribute = event.target.getAttribute('data-p'); if (!attribute) return; let eventData = attribute.split(/,(.+)/); let events = [JSON.parse(eventData[0]), JSON.parse(eventData[1] || '{}')]; plausible(...events); } } }); }); } ================================================ FILE: html/assets/js/page/overlay/settings_overlay.js ================================================ /** This JS file is used for the connection between the settings "backend" and "frontend" */ function OverlaySettings() {} // Toggles a single element visible / hidden var toggleSubEntry = function(id, show) { if (show) { $(id).removeClass("hidden"); } else { $(id).addClass("hidden"); } } // Sets a slider to the given value var setSliderEntry = function (sliderId, textId, value) { $(sliderId).val(Settings.display.kanjiAnimationSpeed.val); $(textId).html(Math.round(Settings.display.kanjiAnimationSpeed.val * 100) + "%"); } // Sets a specific input's value var setInput = function (id, value) { let kanjiInput = $(id); kanjiInput.val(value); if (value) { kanjiInput.parent().addClass("is-dirty"); } } // Updates all dropdowns OverlaySettings.updateDropdowns = function() { // "Language" page document.querySelectorAll("#search-lang-select > .choices__item--choice").forEach((e) => { if (e.dataset.value == Settings.language.searchLang.val) { let choicesInner = e.parentElement.parentElement.parentElement.children[0].children; choicesInner[0].children[0].innerHTML = e.innerHTML; choicesInner[1].children[0].innerHTML = e.innerHTML; } }); document.querySelectorAll("#page-lang-select > .choices__item--choice").forEach((e) => { if (e.dataset.value == Settings.language.pageLang.val) { let choicesInner = e.parentElement.parentElement.parentElement.children[0].children; choicesInner[0].children[0].innerHTML = e.innerHTML; choicesInner[1].children[0].innerHTML = e.innerHTML; } }); } // Updates all checkboxes OverlaySettings.updateCheckboxes = function() { // "Search" page Util.setMdlCheckboxState("show_eng_settings", Settings.search.alwaysShowEnglish.val); Util.setMdlCheckboxState("show_eng_on_top_settings", Settings.search.showEnglishOnTop.val); Util.setMdlCheckboxState("show_example_sentences_settings", Settings.search.showExampleSentences.val); Util.setMdlCheckboxState("show_sentence_furigana_settings", Settings.search.showFurigana.val); Util.setMdlCheckboxState("focus_search_bar_settings", Settings.search.focusSearchbar.val); Util.setMdlCheckboxState("select_searchbar_content_settings", Settings.search.selectSearchbarContent.val); // "Display" page Util.setMdlCheckboxState("use_dark_mode_settings", Settings.display.theme.val === "dark"); Util.setMdlCheckboxState("show_kanji_on_load_settings", Settings.display.showKanjiOnLoad.val); Util.setMdlCheckboxState("show_kanji_numbers_settings", Settings.display.showKanjiNumbers.val); // "Other" page Util.setMdlCheckboxState("dbl_click_copy_settings", Settings.other.enableDoubleClickCopy.val); Util.setMdlCheckboxState("tracking_settings", Settings.other.trackingAllowed.val); } // Updates all Sub entries OverlaySettings.updateSubEntries = function() { // "Search" page toggleSubEntry("#eng_on_top_parent", Settings.search.alwaysShowEnglish.val); toggleSubEntry("#select_searchbar_content_parent", Settings.search.focusSearchbar.val); } // Updates all sliders OverlaySettings.updateSliders = function() { // "Display" page setSliderEntry("#show_anim_speed_settings", "#show_anim_speed_settings_slider", Settings.display.kanjiAnimationSpeed.val); } // Updates all inputs OverlaySettings.updateInputs = function() { setInput("#items_per_page_input", Settings.search.itemsPerPage.val); setInput("#kanji_per_page_input", Settings.search.kanjiPerPage.val); } ================================================ FILE: html/assets/js/page/sentencePage.js ================================================ // Toggles the given translation visible / invisible function toggleTranslation(element) { let parent = $(element.parentElement); parent.find(".sentence-translation").toggle("hidden"); parent.find(".lang-separator").toggle("hidden"); parent.find(".sentence-toggle").toggleClass("hidden"); } ================================================ FILE: html/assets/js/page/wordPage.js ================================================ // Object reference for sentence reader const sr = document.getElementById("sr"); // Enable sentence-example expander $(".expander").on("click", (event) => { event.target.classList.toggle("on"); event.target.parentElement.children[0].classList.toggle("collapsed"); }); // On first load and on every page resize: check where the expander-triangle is needed & whether sentence reader should be centered hideUnusedExpanders(); centerSentenceReaderIfNeeded(); var screenWidth = $(window).width(); $(window).resize(() => { // Mobile scrolling sends resize events because of the (dis-)appearing url input. Simple fix: ignore height changes. if ($(window).width() == screenWidth) { return; } screenWidth = $(window).width(); hideUnusedExpanders(); centerSentenceReaderIfNeeded(); }); // If the reader is overflown, remove the center to avoid weird style errors function centerSentenceReaderIfNeeded() { if (sr === undefined || sr === null) return; if (Util.checkOverflow(sr)) { sr.parentElement.classList.add("no-center"); } else { sr.parentElement.classList.remove("no-center"); } } // Scrolls the sentence reader onto the selected element Util.awaitDocumentReady(scrollSentenceReaderIntoView); function scrollSentenceReaderIntoView() { let selected = $(".sentence-part.selected")[0]; if (selected !== undefined) { $(".search-annotation").scrollLeft(selected.offsetLeft - $(".search-annotation")[0].offsetLeft); $(".search-annotation").scrollTop(selected.offsetTop - $(".search-annotation")[0].offsetTop); } } // Check if the expander-triangle should be hidden function hideUnusedExpanders() { $(".expander").each((i,e) => { if (e.parentElement.children[0].scrollHeight < 40) { e.classList.add("hidden"); } else { e.classList.remove("hidden"); } }); } ================================================ FILE: html/assets/js/qol.js ================================================ /** * This JS-File contains some Quality of Life improvements for the website */ var shiftPressed = false; // Prevent random dragging of elements $('a').mousedown((event) => { event.preventDefault(); }); $(document).on('keyup keydown keypress', function (e) { shiftPressed = e.shiftKey }); // Key Events for easy usability $(document).on("keypress", (event) => { if ($('input:text').is(":focus")) return; switch (event.key) { case '/': // Focus search bar event.preventDefault(); $('#search').focus(); $('#search').select(); if (window.plausible) plausible("shortcut", {props: {key: "/"}}); break case 'w': // Focus search bar changeSearchType(null, "0"); if (window.plausible && Util.isIndexPage()) plausible("shortcut", {props: {key: "w"}}); break; case 'k': // Change to Word Tab changeSearchType(null, "1"); if (window.plausible && !Util.isIndexPage()) plausible("shortcut", {props: {key: "k"}}); break; case 's': // Change to Sentence Tab changeSearchType(null, "2"); if (window.plausible && !Util.isIndexPage()) plausible("shortcut", {props: {key: "s"}}); break; case 'n': // Change to Names Tab changeSearchType(null, "3"); if (window.plausible && !Util.isIndexPage()) { plausible("shortcut", {props: {key: "n"}}); } break; case 'N': // Open index in new tab window.open(location.origin, "_blank"); break; case 'p': // Play first Audio on page $(".audioBtn").first().trigger("click"); if (window.plausible && !Util.isIndexPage()) plausible("shortcut", {props: {key: "p"}}); break; case "Enter": // Do a search while rad-picker is opened if (!$(".overlay.radical").hasClass("hidden")) { $(".btn-search").click(); } break; default: if (event.key > 0 && event.key < 10) { let kanji = $('.kanji-preview.large.black')[event.key - 1] if (kanji !== undefined) { kanji.click(); } } } }); // Copies Furigana to clipboard on click $('.furigana-preview').on("click", (event) => { // Check if element should not be copied if (!shouldCopyFurigana(event)) return; // Copy and show message preventDefaultHighlight(event, 100, true, false); JotoTools.copyTextAndEcho($(event.target).html().trim(), "QOL_FURI_COPIED"); }); // Copies full Furigana to clipboard on dblclick $('.furigana-preview').on("dblclick", (event) => { // Check if element should not be copied if (!shouldCopyFurigana(event)) return; // Find all furigana let parent = $(event.target.parentElement.parentElement); let furi = ""; parent.find('.furigana-preview, .inline-kana-preview').each((i, element) => { furi += element.innerHTML.trim(); }); // Copy and show the correct message preventDefaultHighlight(event, 100, false); Util.copyToClipboard(furi); $('.msg-message.msg-success.msg-visible').last().remove(); $('.msg-message.msg-success.msg-visible').last().html(getText("QOL_FURI_COPIED_ALL")); }); // Copies translations to clipboard on double click $('.kanji-preview').on("dblclick", (event) => { // Check if element should not be copied if (!shouldCopyKanji()) return; // Copy preventDefaultHighlight(event, 500, false); copyTranslationAndShowMessage(event.target.parentElement.parentElement); }); // Prevent double click highlight document.querySelectorAll(".furigana-kanji-container").forEach(container => { container.addEventListener('mousedown', function (event) { if (event.detail > 1) { event.preventDefault(); } }, false); }); // Copies translations to clipboard on double click $('.inline-kana-preview').on("dblclick", (event) => { // Check if element should not be copied if (!shouldCopyKanji()) return; // Copy preventDefaultHighlight(event, 500, false); copyTranslationAndShowMessage(event.target.parentElement); }); // -tag Fix for standard double click document.querySelectorAll(".furigana-kanji-container").forEach(container => { container.addEventListener("dblclick", () => { // Dont do anything if auto-copy is turned on if (shouldCopyKanji()) { return; } // Get and clear the selection let selection = window.getSelection(); selection.removeAllRanges(); // Select all non-furigana children #1 Firefox exclusive: Multiple selection ranges if (navigator.userAgent.search("Firefox") > -1) { container.childNodes.forEach((child) => { var range = document.createRange(); range.setStartBefore(child); if (child.tagName === "RUBY") { range.setEndAfter(child.children[0]); } else { range.setEndAfter(child); } selection.addRange(range); }); // Select all non-furigana children #2 } else { var range = document.createRange(); range.setStartBefore(container); let lastChild = container.lastChild; if (lastChild.tagName === "RUBY") { range.setEndAfter(lastChild.children[0]); } else { range.setEndAfter(lastChild); } selection.addRange(range); } }); }); // Check conditions for copying Furigana function shouldCopyFurigana(event) { // Prevent copying if the text was just a placeholder if (event.target.innerHTML == " ") return false; // Prevent if furigana is part of the sentence reader if ($(event.target).parents().toArray().includes($("#sr")[0])) { return false; } // Prevent if user has removed the feature return Settings.other.enableDoubleClickCopy.val; } // Check conditions for copying Kanji function shouldCopyKanji() { // Prevent if user has removed the feature return Settings.other.enableDoubleClickCopy.val; } // Prevents the default User highlighting function preventDefaultHighlight(event, timeoutDurationMs, disableClick, disableDoubleClick) { startEventTimeout(event.target, timeoutDurationMs, disableClick, disableDoubleClick); event.preventDefault(); Util.deleteSelection(); } // Disbaled onclick events for a short period of time function startEventTimeout(targetElement, durationMs, disableClick = true, disableDoubleClick = true) { // Disbale events for single clicks if (disableClick) { let eventFunc = $._data(targetElement, "events").click[0].handler; $._data(targetElement, "events").click[0].handler = () => { }; setTimeout(() => { $._data(targetElement, "events").click[0].handler = eventFunc; }, durationMs); } // Disable events for double clicks if (disableDoubleClick) { let eventFuncDbl = $._data(targetElement, "events").dblclick[0].handler; $._data(targetElement, "events").dblclick[0].handler = () => { }; setTimeout(() => { $._data(targetElement, "events").dblclick[0].handler = eventFuncDbl; }, durationMs); } } // Used by kanji/kana copy to combine all parts, starts from the flex (parent) function copyTranslationAndShowMessage(textParent) { let fullContent = ""; let onlyKanji = true; let onlyKana = true; // Find all childs that are of interest $(textParent).find('.kanji-preview, .inline-kana-preview').each((i, element) => { let txt = element.innerHTML.trim(); fullContent += txt for (char of txt) { let isKanji = char.match(kanjiRegEx); if (isKanji) { onlyKana = false; } else { onlyKanji = false; } } }); // Copy and visual feedback JotoTools.copyTextAndEcho(fullContent, onlyKanji ? getText("QOL_KANJI_COPIED") : (onlyKana ? getText("QOL_KANA_COPIED") : getText("QOL_SENTENCE_COPIED"))) } // Changes the search type in the upper row depending on the users input function changeSearchType(html, newType) { var search_value = $('#search').val(); if (search_value.length > 0) { Util.loadUrl(JotoTools.createUrl(search_value, newType)); } } // Hides the backdrop if clicked directly on it function onBackdropClick(event) { if (event.target.id === "backdrop") { event.target.classList.add("hidden"); } } // Focus Search Bar on load if the user wants it to (or on index page) Util.awaitDocumentReady(() => { let is_index = Util.isIndexPage(); if (Settings.search.focusSearchbar.val && !is_index) { preventNextApiCall = true; } if (Settings.search.focusSearchbar.val || is_index) { let s = $('#search'); s.focus(); Util.setCaretPosition("search", -1); if (Settings.search.selectSearchbarContent.val) { s[0].setSelectionRange(0, s[0].value.length); } } }); // Wait for the Document to load completely Util.awaitDocumentReady(() => { // Iterate all audio Btns on the page (if any) and enable their audio feature $('.audioBtn').each((e, i) => { let audioParent = $(i); audioParent.click((e) => { let audio = $(e.target).children()[0]; audio.play(); }); }); // Allow right-click on "Play audio" buttons to copy the proper asset-url $(".audioBtn").contextmenu((event) => { event.preventDefault(); var url = window.location.origin + $(event.target).attr('data'); JotoTools.copyTextAndEcho(url, "QOL_AUDIO_COPIED"); }); // Disables the dropdown's animation until the first onclick event $(".input-field.first-wrap").one("click", (event) => { $('.choices__list.choices__list--dropdown.index').addClass('animate'); }) // Install the serviceWorker for PWA if ('serviceWorker' in navigator) { navigator.serviceWorker.register('/service-worker.js', { scope: "." }) .catch(function (error) { console.log('Service worker registration failed, error:', error); }); } // Change URL to contain the language code if (Util.isInPath("search")) { let currentParams = new URLSearchParams(document.location.search); let txt = document.getElementById("search").value; let index = currentParams.get("i") || undefined; let type = currentParams.get("t") || $('#search-type').val(); let lang = currentParams.get("l") || Settings.language.searchLang.val; let page = currentParams.get("p") || $(".pagination-circle.active").html(); history.replaceState({}, 'Jotoba', JotoTools.createUrl(txt, type, page || 1, lang, index)); } }); ================================================ FILE: html/assets/js/search/api.js ================================================ function API() {}; // Used to store old Requests so they can be cancelled when no longer needed API.lastRequest = undefined; // Numbers > -1 mean that no API call will be made when input.length is above the value API.suggestionStop = -1; /** * Calls the API to get input suggestions * @param radicalArray {[]} containing radicals that need to be contained in searched kanji */ API.getSuggestionApiData = function(radicalArray, successFn, errorFn) { // Check if API call should be prevented if (preventNextApiCall) { preventNextApiCall = false; return; } // Prevent if a request failed and the input is >= the text it failed against if (API.suggestionStop > -1 && input.value.length > API.suggestionStop) { return; } else { API.suggestionStop = -1; } // Create the JSON let lang = Cookies.get("default_lang"); let type = JotoTools.getCurrentSearchType(); let txt = input.value; if (txt.length == 0) { return; } let inputJSON = { "input": txt, "search_type": type, "lang": lang === undefined ? "en-US" : lang, "radicals": radicalArray || [] } // Abort any requests sent earlier if (API.lastRequest !== undefined) { API.lastRequest.abort(); } // Send Request to backend API.lastRequest = $.ajax({ type : "POST", url : "/api/suggestion", data: JSON.stringify(inputJSON), headers: { 'Content-Type': 'application/json' }, success : function(result) { successFn(result); }, error : function(result) { if (result.statusText !== "abort") { errorFn(result); } } }); } /** * Emulates the API behaviour for suggestions; returning Hashtag values instead * @param currentText {string} a single word without spaces, representing the #-value * @param callback {function} function to call after collecting suggestions */ API.getHashtagData = function(currentText, callback) { let suggestions = []; for (let i = 0; i < hashtags.length; i++) { if (hashtags[i].toLowerCase().includes(currentText.toLowerCase())) { suggestions.push({"primary": hashtags[i]}); if (suggestions.length == 10) { break; } } } let resultJSON = { "suggestions": suggestions, "suggestion_type": "hashtag" } callback(resultJSON); } /** * Returns the kanji decomposition tree's data of the given literal * * @param {string} targetLiteral literal to search for * @returns the API result */ API.getGraphData = async function(targetLiteral) { // Generate input let inputJSON = { "literal": targetLiteral, "full": Settings.search.showFullGraph.val }; // Get the data result from the server let result = await $.ajax({ type : "POST", url : "/api/kanji/decompgraph", data: JSON.stringify(inputJSON), headers: { 'Content-Type': 'application/json' } }); return result; } ================================================ FILE: html/assets/js/search/eventHandler.js ================================================ /* * Made to Handle search related events. Loads after search.js! */ // Key Events focussing on the search $(document).on("keydown", (event) => { if (!$('#search').is(":focus")) return; // Switch the key code for potential changes switch (event.key) { case "ArrowUp": // Use suggestion above current event.preventDefault(); Suggestions.overlay.changeSuggestionIndex(-1); break; case "ArrowDown": // Use suggestion beneath current case "Tab": event.preventDefault(); var direction = 1; if (event.key == "Tab" && shiftPressed) { direction = -1; } Suggestions.overlay.changeSuggestionIndex(direction); break; case "Enter": // Start the search if (currentSuggestionIndex > 0) { event.preventDefault(); Suggestions.overlay.activateSelection(); } else { $('#searchBtn').click(); } break; } }); // Adding listeners Util.awaitDocumentReady(() => { // Also show shadow text if user clicked before focus event could be caught if ($(input).is(":focus")) { Suggestions.updateSuggestions(); } // Event whenever the user types into the search bar document.getElementById("search").addEventListener("input", e => { Suggestions.updateSuggestions(); toggleSearchIcon(200); }); // Check if input was focussed / not focussed to show / hide overlay document.getElementById("search").addEventListener("focus", e => { Suggestions.updateSuggestions(); }); // Event whenever the user types into the search bar document.querySelector("#kanji-search").addEventListener("input", e => { getRadicalSearchResults(); }); // When clicking anything but the search bar or dropdown (used to hide overlays) document.addEventListener("click", e => { if (!Util.isChildOf(searchRow, e.target)) { sContainer.parentElement.classList.add("hidden"); } }); // Check on resize if shadow text would overflow the search bar and show / hide it window.addEventListener("resize", e => { setShadowText(); }); }); // Scroll sentence-reader to display selected index Util.awaitDocumentReady(() => { let sentencePart = $('.sentence-part.selected'); if (sentencePart.length > 0) { $('#sr')[0].scrollTop = (sentencePart.offset().top); } }); // Initialize Pagination Buttons Util.awaitDocumentReady(() => { $('.pagination-item:not(.disabled) > button').on("click", (e) => { var searchValue = JotoTools.getCurrentSearch(); var searchType = JotoTools.getCurrentSearchType(); var targetPage = $(e.target.parentNode).attr("target-page"); Util.loadUrl(JotoTools.createUrl(searchValue, searchType, targetPage)); }); }); ================================================ FILE: html/assets/js/search/overlay/imageSearch.js ================================================ /** * This file handles everything related to image-search requests */ // Quick image search for STRG + V document.onpaste = (evt) => { let dT = evt.clipboardData || window.clipboardData; let file = dT.files[0]; if (file !== undefined && file.name.includes(".png")) { disableUploadUrlInput(file.name); openImageCropOverlay(file); } }; // Shows / Hides the image search overlay function toggleImageSearchOverlay() { let overlay = $('.overlay.image'); overlay.toggleClass('hidden'); // Reset on close if (urlInputDisabled) { document.getElementById("imgUploadFile").value = null; resetUploadUrlInput(); } closeAllSubSearchbarOverlays("image"); } // Clicks on the upload SVG should trigger the underlying function function imgUploadAltClick() { document.getElementById("imgUploadFile").click(); } // Blocks the URL input upon file selection function imgSearchFileSelected() { let fileInput = document.getElementById("imgUploadFile").files[0]; if (fileInput !== undefined) { disableUploadUrlInput(fileInput.name); openImageCropOverlay(); } else { resetUploadUrlInput(); } } // Toggles the URL input active / disabled var urlInputDisabled = false; var originalMsg, cropTarget; Util.awaitDocumentReady(() => { originalMsg = document.getElementById("imgUploadUrl").placeholder; }); function resetUploadUrlInput() { let urlInput = document.getElementById("imgUploadUrl") urlInput.classList.remove("disabled"); urlInput.disabled = false; urlInputDisabled = false; urlInput.placeholder = originalMsg; document.getElementById("imgUploadFile").value = null; if (cropTarget !== null) { cropTarget.croppie("destroy"); } toggleCroppingModal(); } function disableUploadUrlInput(newMessage) { let urlInput = document.getElementById("imgUploadUrl") urlInput.classList.add('disabled'); urlInput.disabled = true; urlInputDisabled = true; urlInput.value = null; urlInput.placeholder = newMessage; } // Opens the Image Cropping Overlay function openImageCropOverlay(pastedFile) { var selectedFiles = document.getElementById("imgUploadFile").files; var inputUrl = document.getElementById("imgUploadUrl").value; if (selectedFiles.length > 0 || pastedFile !== undefined) { let reader = new FileReader(); reader.onload = function(e) { initCroppie(e.target.result); } reader.readAsDataURL(selectedFiles[0] || pastedFile); toggleCroppingModal(); } else if (inputUrl.length > 0) { Util.checkUrlIsImage(inputUrl, () => { initCroppie(inputUrl); }); toggleCroppingModal(); } else { Util.showMessage("error", getText("UPLOAD_NO_INPUT")); } } // Receives the image from Croppie, sends it to the server and starts the search function uploadCroppedImage(dataUrl) { cropTarget.croppie('result', { type: 'canvas', size: 'viewport' }).then(function (resp) { // Generate a file from the Base64 String let generatedFile = Util.convertDataURLtoFile(resp); // Block Screen until Server responded $("#loading-screen").toggleClass("show", true); // Send the Request and handle it Util.sendFilePostRequest(generatedFile, "/api/img_scan", function(responseText) { let response = JSON.parse(responseText); if (response.code !== undefined) { // JSON doesnt have a code when the text is given Util.showMessage("error", response.message); $("#loading-screen").toggleClass("show", false); } else { if (response.text.length == 1 && response.text.match(kanjiRegEx)) { Util.loadUrl(JotoTools.createUrl(response.text, 1)); } else { Util.loadUrl(JotoTools.createUrl(response.text)); } } }); }); resetUploadUrlInput(); } // Loads the Image Cropper function initCroppie(inputUrl) { cropTarget = $('#croppingTarget').croppie({ showZoomer: false, enableResize: true, enableOrientation: true, mouseWheelZoom: 'ctrl' }); cropTarget.croppie('bind', { url: inputUrl, }); cropTarget.croppie('result', 'html').then(function(html) { }); } // Custom Modal Toggle function for the custom Modal var modalIsVisible = false; function toggleCroppingModal() { if (modalIsVisible) { $(".modal-backdrop").remove(); $("#imageCroppingModal").css("display", "none"); } else { $("body").append(''); $("#imageCroppingModal").css("display", "block"); } modalIsVisible = !modalIsVisible; $("#imageCroppingModal").modal(); $("#imageCroppingModal").toggleClass("show"); } ================================================ FILE: html/assets/js/search/overlay/radicalSearch.js ================================================ /** * Used to handle the radical search */ const radicals = [ ["一", "|", "丶", "ノ", "乙", "亅"], ["二", "亠", "人", "⺅", "𠆢", "儿", "入", "ハ", "丷", "冂", "冖", "冫", "几", "凵", "刀", "⺉", "力", "勹", "匕", "匚", "十", "卜", "卩", "厂", "厶", "又", "マ", "九", "ユ", "乃", "𠂉"], ["⻌", "口", "囗", "土", "士", "夂", "夕", "大", "女", "子", "宀", "寸", "小", "⺌", "尢", "尸", "屮", "山", "川", "巛", "工", "已", "巾", "干", "幺", "广", "廴", "廾", "弋", "弓", "ヨ", "彑", "彡", "彳", "⺖", "⺘", "⺡", "⺨", "⺾", "⻏", "⻖", "也", "亡", "及", "久"], ["⺹", "心", "戈", "戸", "手", "支", "攵", "文", "斗", "斤", "方", "无", "日", "曰", "月", "木", "欠", "止", "歹", "殳", "比", "毛", "氏", "气", "水", "火", "⺣", "爪", "父", "爻", "爿", "片", "牛", "犬", "⺭", "王", "元", "井", "勿", "尤", "五", "屯", "巴", "毋"], ["玄", "瓦", "甘", "生", "用", "田", "疋", "疒", "癶", "白", "皮", "皿", "目", "矛", "矢", "石", "示", "禸", "禾", "穴", "立", "⻂", "世", "巨", "冊", "母", "⺲", "牙"], ["瓜", "竹", "米", "糸", "缶", "羊", "羽", "而", "耒", "耳", "聿", "肉", "自", "至", "臼", "舌", "舟", "艮", "色", "虍", "虫", "血", "行", "衣", "西"], ["臣", "見", "角", "言", "谷", "豆", "豕", "豸", "貝", "赤", "走", "足", "身", "車", "辛", "辰", "酉", "釆", "里", "舛", "麦"], ["金", "長", "門", "隶", "隹", "雨", "青", "非", "奄", "岡", "免", "斉"], ["面", "革", "韭", "音", "頁", "風", "飛", "食", "首", "香", "品"], ["馬", "骨", "高", "髟", "鬥", "鬯", "鬲", "鬼", "竜", "韋"], ["魚", "鳥", "鹵", "鹿", "麻", "亀", "啇", "黄", "黒"], ["黍", "黹", "無", "歯"], ["黽", "鼎", "鼓", "鼠"], ["鼻", "齊"], ["龠"] ]; var radicalMask = [ [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0], [0] ]; var baseRadResult; var currentSearchInput; var lastRadicalSearchResult; Util.awaitDocumentReady(() => { baseRadResult = $('.rad-results')[0].innerHTML; loadRadicals(0); // Used to re-focus searchbar upon using Radical Btns $("#kanji-search").focus(e => { currentSearchInput = $("#kanji-search"); }); $("#search").focus(e => { currentSearchInput = $("#search"); }); }); // Opens | Closes the Radical overlay function toggleRadicalOverlay() { closeAllSubSearchbarOverlays("radical"); let overlay = $('.overlay.radical'); overlay.toggleClass('hidden'); sContainer.parentElement.classList.add("hidden"); // Reset on close if (overlay.hasClass("hidden")) { resetRadPicker() rContainer.classList.add("hidden"); Suggestions.overlay.show(); } else { $('.rad-results').html(baseRadResult); $('.rad-results').removeClass("hidden"); Suggestions.updateSuggestions(); scrollSearchIntoView(); $('#kanji-search').focus(); } } // Called by reset btn. Deselects all function resetRadPicker() { $('.rad-btn.selected').each((i, e) => { $(e).removeClass("selected"); }); $('.rad-btn.disabled').each((i, e) => { $(e).removeClass("disabled"); }); iterateMaskAsync((i, j) => { radicalMask[i][j] = 0; }); $('.rad-results').html(baseRadResult); resetAllTabs(); currentSearchInput.focus(); } // Adds the selected Kanji to the search bar function handleKanjiSelect(event) { // Insert Kanji in search bar $('#search').val($('#search').val() + event.target.innerHTML); // Update search bar Suggestions.updateSuggestions(); toggleSearchIcon(200); // Focus the last search bar currentSearchInput.focus(); } // Toggles Radicals on Input and loads the results function handleRadicalSelect(event) { let target = $(event.target); // Dont do anything if disabled if (target.hasClass("disabled")) { return; } // Update Radical Map if (target.hasClass("selected")) { radicalMask[target.attr("index")][target.attr("position")] = 0; } else { radicalMask[target.attr("index")][target.attr("position")] = 1; } // Make results visible again if they were hidden $('.rad-results').removeClass("hidden"); // Toggle the "selected" class target.toggleClass('selected'); // Get possible Kanji / Radicals from selection getRadicalInfo(); // Focus the last search bar currentSearchInput.focus(); // Update search bar Suggestions.updateSuggestions(getSelectedRadicalArray()); } // Opens the Radical Page at the given index let lastRadicalPage; function openRadicalPage(index) { // Handle special pages if (index == -1) { if (lastRadicalPage !== undefined) { index = lastRadicalPage; } else { openRadicalPage(0); return; } } // Iterate buttons and update whether to hightlight them or not $(".rad-page-toggle > span").each((i, e) => { if (i == index) { e.classList.add("selected"); lastRadicalPage = index; } else e.classList.remove("selected"); }); // Load Radicals of new page loadRadicals(index); // Focus the last search bar currentSearchInput.focus(); } // Clears the shown Radical list function clearRadicals() { // Clear Radicals $(".rad-btn.picker:not(.num)").each((i, e) => { if (e.classList.contains("selected")) { radicalMask[e.getAttribute("index")][e.getAttribute("position")] = 1; } }); $(".rad-picker").html(""); } // Loads the Radicals of the specific tab function loadRadicals(tabIndex) { // Clear Radicals clearRadicals(); // Add Radicals if (tabIndex == 0) { addRadicals(0); addRadicals(1); } else if (tabIndex == 9) { for (let i = 10; i < radicals.length; i++) { addRadicals(i); } } else if (tabIndex == 10) { loadRadicalSearchResults(lastRadicalSearchResult); } else { addRadicals(tabIndex+1); } } // Loads the given Array into the Select Radicals Tab function addRadicals(arrayIndex) { let html = $(".rad-picker").html(); html += ''+(arrayIndex+1)+''; for (let i = 0; i < radicals[arrayIndex].length; i++) { html += ''+radicals[arrayIndex][i]+''; } $(".rad-picker").html(html); } // Appends radicals contained in an array function addRadicalsFromArray(index, array) { let html = $(".rad-picker").html(); html += ''+index+''; index -= 1; for (let a = 0; a < array.length; a++) { for (let j = 0; j < radicals[index].length; j++) { if (radicals[index][j] == array[a]) { html += ''+radicals[index][j]+''; } } } $(".rad-picker").html(html); } // Loads Kanji / Radical result from API into frontend function loadRadicalResults(info) { var rrHtml = ""; // Get and Iterate Kanji Keys let kanjiKeys = Object.keys(info.kanji) // Iterate all and add for (let i = 0; i < kanjiKeys.length; i++) { // Get the data let key = kanjiKeys[i]; let possibleKanji = info.kanji[key]; // Create the stroke-count btn rrHtml += '' + key + ''; let kanjiBtns = ""; // Create the btn for each entry for (let j = 0; j < possibleKanji.length; j++) { kanjiBtns += '' + possibleKanji[j] + ''; } rrHtml += kanjiBtns; } $('.rad-results').html(rrHtml); // Only activate possible radicals let currentRadicals = $('.rad-btn.picker:not(.num)').toArray(); for (let i = 0; i < currentRadicals.length; i++) { let rad = $(currentRadicals[i]); if (info.possible_radicals.includes(rad.html()) || rad.hasClass("selected")) { rad.removeClass("disabled"); } else { rad.addClass("disabled"); } } // Apply changes to mask iterateMaskAsync((i, j) => { if (!info.possible_radicals.includes(radicals[i][j])) { radicalMask[i][j] = -1; } else if (radicalMask[i][j] == -1) { radicalMask[i][j] = 0; } }); } // Calls the given function on every iteration of the array. Passes i (outer) and j (inner) as params. function iterateMaskAsync(functionToCall, startIndex, endIndex) { if (startIndex == undefined) { let middle = Math.floor(radicals.length / 2); iterateMaskAsync(functionToCall, middle, radicals.length); startIndex = 0; endIndex = middle; } for (let i = startIndex; i < endIndex; i++) { for (let j = 0; j < radicals[i].length; j++) { functionToCall(i, j); } } updateTabVisuals(); } // Checks whether the Page-Tabs have to be colored in a specific way (None possible, element selected...) async function updateTabVisuals() { for (let i = 0; i < 10; i++) { let tabStatus = -1; // First Tab if (i == 0) { tabStatus = checkRadicalsInTab(0); let tabStatus2 = checkRadicalsInTab(1); if (tabStatus2 == 0 && tabStatus == -1) tabStatus = 0; else if (tabStatus2 == 1) tabStatus = 1; } // Last Tab else if (i == 9) { for (let j = 10; j < radicals.length; j++) { tabStatus = checkRadicalsInTab(j); $("#r-t"+j).toggleClass("disabled", tabStatus == -1); $("#r-t"+j).toggleClass("highlighted", tabStatus == 1); } break; } // Any other Tab else { tabStatus = checkRadicalsInTab(i+1); } $("#r-t"+i).toggleClass("disabled", tabStatus == -1); $("#r-t"+i).toggleClass("highlighted", tabStatus == 1); } } // Called by updateTabVisuals. Checks for tabDisabled (-1) | normal (0) | highlighted (1) function checkRadicalsInTab(arrayIndex) { let status = -1; for (let i = 0; i < radicals[arrayIndex].length; i++) { if (radicalMask[arrayIndex][i] == 0) { status = 0; } else if (radicalMask[arrayIndex][i] == 1) { status = 1; break; } } return status; } // Resets all Radical-Tabs by removing class-modifiers function resetAllTabs() { for (let i = 0; i < 10; i++) { $("#r-t"+i).removeClass("disabled"); $("#r-t"+i).removeClass("highlighted"); } } // Resets all Radical-Tabs by removing class-modifiers including the selected tab function closeAllTabs() { for (let i = 0; i < 10; i++) { $("#r-t"+i).removeClass("disabled"); $("#r-t"+i).removeClass("selected"); } } // Returns an array only containing selected radicals function getSelectedRadicalArray() { let arr = []; // Populate radicals within JSON with all selected radicals for (let i = 0; i < radicalMask.length; i++) { for (let j = 0; j < radicalMask[i].length; j++) { if (radicalMask[i][j] == 1) { arr.push(radicals[i][j]); } } } return arr; } // Calls the API to get all kanji and radicals that are still possible function getRadicalInfo() { // Create the JSON let radicalJSON = { "radicals": getSelectedRadicalArray() } // No Radicals selected, Reset if (radicalJSON.radicals.length == 0) { $('.rad-btn.disabled').each((i, e) => { $(e).removeClass("disabled"); }); iterateMaskAsync((i, j) => { if (radicalMask[i][j] == -1) { radicalMask[i][j] = 0; } }); resetAllTabs(); return; } // Send Request to backend $.ajax({ type: "POST", url: "/api/kanji/by_radical", data: JSON.stringify(radicalJSON), headers: { 'Content-Type': 'application/json' }, success: function (result) { // Load the results into frontend loadRadicalResults(result); }, error: function (result) { // Print Error Util.showMessage("error", getText("RADICAL_API_UNREACHABLE")) } }); } // Calls the API to get input suggestions var lastRadRequest; function getRadicalSearchResults() { // Get value for the input let query = $("#kanji-search").val(); if (query.length == 0) { return; } // Create the JSON let inputJSON = { "query": query } // Abort any requests sent earlier if (lastRadRequest !== undefined) { lastRadRequest.abort(); } // Send Request to backend lastRadRequest = $.ajax({ type : "POST", url : "/api/radical/search", data: JSON.stringify(inputJSON), headers: { 'Content-Type': 'application/json' }, success : function(result) { console.log(result); // Load the results into frontend loadRadicalSearchResults(result); lastRadicalSearchResult = result; }, error : function(result) { $("#r-tc").removeClass("show"); $("#r-tc").removeClass("selected"); } }); } // Visualizes the results of getRadicalSearchResults function loadRadicalSearchResults(results) { let firstFound = false; for (let i = 1; i <= 15; i++) { if (results.radicals[i] !== undefined) { if (!firstFound) { firstFound = true; clearRadicals(); closeAllTabs(); $("#r-tc").addClass("show"); $("#r-tc").addClass("selected"); } addRadicalsFromArray(i, results.radicals[i]); } } if (!firstFound) { $("#r-tc").removeClass("show") openRadicalPage(-1); } } ================================================ FILE: html/assets/js/search/overlay/speechSearch.js ================================================ /** * This JS-File implements the Speech to Text functionality for text input */ var SpeechRecognition, recognition; try { SpeechRecognition = SpeechRecognition || webkitSpeechRecognition; recognition = new SpeechRecognition(); recognitionSetup(); } catch (e) {} // Handles the initial setup of the recognition lib function recognitionSetup() { recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.maxAlternatives = 1; // On recognition start recognition.onstart = function() { $('#currentlyListening').html(getText("SPEECH_LISTEN_YES")); $('.voiceSvg').toggleClass("active"); }; // On recognition error recognition.onerror = function(event) { console.log(event.error); switch(event.error) { case "not-allowed": Util.showMessage("error", getText("SPEECH_NO_PERMISSION")); break; case "aborted": Util.showMessage("info", getText("SPEECH_ABORT")); break; case "no-speech": Util.showMessage("info", getText("SPEECH_NO_VOICE")); break; default: Util.showMessage("error", getText("SPEECH_NOT_SUPPORTED")); } $('#currentlyListening').html(getText("SPEECH_LISTEN_NO")); $('.voiceSvg').toggleClass("active"); } // On speech end recognition.onspeechend = function() { recognition.stop(); $('#currentlyListening').html(getText("SPEECH_LISTEN_NO")); $('.voiceSvg').toggleClass("active"); } // On recognition result recognition.onresult = function(event) { let transcript = event.results[0][0].transcript; $('#search').val(transcript); }; } // Toggles the overlay on and off function toggleSpeakOverlay() { if (recognition == undefined) { Util.showMessage("error", getText("SPEECH_NOT_SUPPORTED")); return; } closeAllSubSearchbarOverlays("speech"); let overlay = $('.overlay.speech'); overlay.toggleClass('hidden'); if (overlay.hasClass("hidden")) { recognition.abort(); recognition.stop(); } } // Activate the given language for speech recognition TODO save in cookie function setRecognitionLang(lang) { recognition.abort(); switch(lang) { case "jap": recognition.lang = "ja"; $('#currentSpeechLang').html(getText("LANG_JAP")); break case "ger": recognition.lang = "de-DE"; $('#currentSpeechLang').html(getText("LANG_GER")); break case "eng": recognition.lang = "en-US"; $('#currentSpeechLang').html(getText("LANG_ENG")); break case "rus": recognition.lang = "ru"; $('#currentSpeechLang').html(getText("LANG_RUS")); break case "spa": recognition.lang = "es-ES"; $('#currentSpeechLang').html(getText("LANG_SPA")); break case "swe": recognition.lang = "sv-SE"; $('#currentSpeechLang').html(getText("LANG_SWE")); break case "fre": recognition.lang = "fr-FR"; $('#currentSpeechLang').html(getText("LANG_FRE")); break case "dut": recognition.lang = "nl-NL"; $('#currentSpeechLang').html(getText("LANG_DUT")); break case "hun": recognition.lang = "hu"; $('#currentSpeechLang').html(getText("LANG_HUN")); break case "slv": recognition.lang = "sl-SI"; $('#currentSpeechLang').html(getText("LANG_SLV")); break } setTimeout(function(){ recognition.start(); }, 400); } ================================================ FILE: html/assets/js/search/overlay/suggestionOverlay.js ================================================ /* * Handles functions related to the suggestion Overlay. Load before search.js! */ Suggestions.overlay = function () {}; // Shows the suggestions overlay Suggestions.overlay.show = function() { if (availableSuggestions > 0 && input.value.length > 0) { sContainer.parentElement.classList.remove("hidden"); if (typeof scrollSearchIntoView === "function") { scrollSearchIntoView(); } } else { sContainer.parentElement.classList.add("hidden"); } } // Searches for the currently selected suggestion Suggestions.overlay.activateSelection = function() { $("#suggestion-container > .search-suggestion")[currentSuggestionIndex-1].click(); } // Selects the suggestion at the index above (-1) or beneath (1) Suggestions.overlay.changeSuggestionIndex = function(direction) { // Remove highlight from last suggestion if (currentSuggestionIndex != 0) { $("#suggestion-container > .search-suggestion")[currentSuggestionIndex-1].classList.remove("selected"); } // Calculate new suggestion index currentSuggestionIndex = Math.positiveMod(currentSuggestionIndex + direction, availableSuggestions + 1); // Set new highlight if (currentSuggestionIndex != 0) { // Get current suggestion let suggestion = $("#suggestion-container > .search-suggestion")[currentSuggestionIndex-1]; let s_children = suggestion.children; // Add Furigana. If Kanji are used, select the secondary suggestion. If user types kanji, show him kanji instead if (s_children[1].innerHTML.length > 0 && input.value.match(kanjiRegEx) === null) { currentSuggestion = s_children[1].innerHTML.substring(1, s_children[1].innerHTML.length - 1); } else { currentSuggestion = s_children[0].innerHTML; } // Mark the suggestion's row suggestion.classList.add("selected"); } else { currentSuggestion = ""; } // Update shadow text setShadowText(); } ================================================ FILE: html/assets/js/search/search.js ================================================ /** * This JS-File contains functions handling the website search (e.g. Search suggestions) */ // Prepare Search / Voice Icon when loading the page Util.awaitDocumentReady(() => { toggleSearchIcon(0); }); // Shows the Voice / Search Icon when possible function toggleSearchIcon(duration) { if (document.getElementById("search").value.length == 0) { $('#searchBtn.search-embedded-btn').hide(duration); $('#voiceBtn.search-embedded-btn').show(duration); } else { $('#searchBtn.search-embedded-btn').show(duration); $('#voiceBtn.search-embedded-btn').hide(duration); } } // Resets the value of the search input function emptySearchInput() { $('#search').val(""); $('#search').focus(); toggleSearchIcon(200); } // Returns the substring of what the user already typed for the current suggestion // If target is not empty, the substring of target will be searched instead function getCurrentSubstring(target) { let currentSubstr = ""; let foundSubstr = false; if (target === undefined) { target = currentSuggestion; } for (let i = target.length; i > 0; i--) { currentSubstr = target.substring(0, i).toLowerCase(); let index = input.value.toLowerCase().lastIndexOf(currentSubstr) if (index == -1) { continue; } if (index + currentSubstr.length === input.value.length) { foundSubstr = true; break; } } return foundSubstr ? currentSubstr : ""; } // Interrupts the form's submit and makes the user visit the correct page function onSearchStart() { var search_value = $('#search').val(); var search_type = $('#search-type').val(); if (window.plausible) { plausible('search', {props: {query: search_value, origin: location.pathname, language: Settings.language.searchLang.val}}); } if (search_value.length == 0) { Util.loadUrl(JotoTools.createUrl()); } else { Util.loadUrl(JotoTools.createUrl(search_value, search_type)); } return false; } // When opening an overlay, scroll it into view function scrollSearchIntoView() { if (document.location.origin+"/" === document.location.href) { var top = $('#search').offset().top; Util.scrollTo(top, 500); } } // Closes all overlays connected to the search bar function closeAllSubSearchbarOverlays(overlayToIgnore) { if (overlayToIgnore !== "speech") $('.overlay.speech').addClass('hidden'); if (overlayToIgnore !== "radical") $('.overlay.radical').addClass('hidden'); if (overlayToIgnore !== "image") $('.overlay.image').addClass('hidden'); } // Opens the Help Page function openHelpPage() { document.getElementsByClassName("infoBtn")[0].classList.remove("new"); Settings.alterOther("firstVisit", false, ); Util.loadUrl("/help"); } function onHomeClick(event) { event.preventDefault(); switch (event.which) { case 1: location.href = location.origin; break; case 2: window.open(location.origin, "_blank"); break; default: break; } } ================================================ FILE: html/assets/js/search/shared.js ================================================ /** * This JS-File contains variables shared between files to improve performance */ const kanjiRegEx = '([一-龯|々|𥝱|𩺊])'; const hashtags = [ "#adverb", "#auxilary", "#conjunction", "#noun", "#prefix", "#suffix", "#particle", "#sfx", "#verb", "#adjective", "#counter", "#expression", "#interjection", "#pronoun", "#numeric", "#transitive", "#intransitive", "#unclassified", "#word", "#sentence", "#name", "#kanji", "#abbreviation","#katakana", "#N5", "#N4", "#N3", "#N2", "#N1", "#JLPT5", "#JLPT4", "#JLPT3", "#JLPT2", "#JLPT1", "#hidden", "#Irregular-Ichidan", "#Abbreviation", "#Archaism", "#ChildrensLanguage", "#Colloquialism", "#Dated", "#Derogatory", "#Familiarlanguage", "#Femaleterm", "#Honorific", "#Humblelanguage", "#Idomatic", "#Legend", "#Formal", "#MangaSlang", "#Maleterm", "#InternetSlang", "#Obsolete", "#Obscure", "#Onomatopoeic", "#PersonName", "#Placename", "#Poeticalterm", "#PoliteLanguage", "#Proverb", "#Quotation", "#Rare", "#Religion", "#Sensitive", "#Slang", "#UsuallyKana", "#Vulgar", "#Artwork", "#Yojijukugo", ]; var currentSuggestion = ""; var currentSuggestionIndex = 0; // 0 => nothing var availableSuggestions = 0; var preventNextApiCall = false; var input, searchRow, shadowText, sContainer, rContainer; Util.awaitDocumentInteractive(() => { input = document.getElementById("search"); searchRow = document.getElementById("search-row"); shadowText = document.getElementById("shadow-text"); sContainer = document.getElementById("suggestion-container"); rContainer = document.getElementById("suggestion-container-rad"); }); ================================================ FILE: html/assets/js/search/suggestions.js ================================================ function Suggestions() {}; /** * Updates the suggestions help and respects selected radicals if given some * * @param radicalArray {[]} containing radicals that need to be contained in searched kanji */ Suggestions.updateSuggestions = function(radicalArray) { // Tooltips for # - searches let lastWord = Util.getLastWordOfString(input.value); if (lastWord.includes("#")) { API.getHashtagData(lastWord, loadSuggestionApiData); // Tooltips for everything else } else if (input.value.length > 0) { API.getSuggestionApiData(radicalArray, loadSuggestionApiData, removeSuggestions); // Remove suggestions if the input is empty } else { removeSuggestions(); } // Set shadow text setShadowText(); } // Sets the shadow's text whenever possible function setShadowText() { // If input is overflown, dont show text if (Util.checkOverflow(shadowText) && shadowText.innerHTML != "") { shadowText.innerHTML = ""; return } // Make invisible temporarily shadowText.style.opacity = 0; // Check how much of suggestion is typed already let currentSubstr = getCurrentSubstring(); // Add missing suggestion to shadow text if (currentSubstr.length > 0) { shadowText.innerHTML = input.value + currentSuggestion.substring(currentSubstr.length); } else { shadowText.innerHTML = ""; } // If it would overflow with new text, don't show if (Util.checkOverflow(shadowText)) { shadowText.innerHTML = ""; } // Make visible again shadowText.style.opacity = 0.4; } // Called only by [getSuggestionApiData]. Loads data called from the API into the frontend function loadSuggestionApiData(result) { // Remove current suggestions removeSuggestions(); // Return if no suggestions were found if (result.suggestions.length == 0) { // Prevent future requests if no result was found and input was > 8 chars if (input.value >= 8) { API.suggestionStop = input.value.length; } // Return return; } else { // Show Suggestions Containers if ($(".overlay.radical").hasClass("hidden")) { sContainer.parentElement.classList.remove("hidden"); } else { rContainer.classList.remove("hidden"); } } // Set suggestion type currentSuggestionType = result.suggestion_type; // Set the amount of possible suggestions availableSuggestions = result.suggestions.length; if (availableSuggestions > 10) { availableSuggestions = 10; } // Add suggestions for (let i = 0; i < availableSuggestions; i++) { // Result variables let primaryResult = ""; let secondaryResult = ""; // Only one result if (result.suggestions[i].secondary === undefined) { primaryResult = result.suggestions[i].primary; } // Two results, kanji needs to be in the first position here else { primaryResult = result.suggestions[i].secondary; secondaryResult = "(" + result.suggestions[i].primary + ")"; } // Get target page var currentPage = JotoTools.getCurrentSearchType(); // Generate the /search/ let searchValue = ""; switch (currentSuggestionType) { case "kanji_reading": searchValue = encodeURIComponent(primaryResult) + " " + encodeURIComponent(result.suggestions[i].primary); break; case "hashtag": let s = input.value.split(" "); searchValue = encodeURIComponent(s.slice(0, s.length-1).join(" ")) + " " + encodeURIComponent(primaryResult); break; default: searchValue = encodeURIComponent(primaryResult); } // Add to Page sContainer.innerHTML += ` ` + ' '+primaryResult+' ' + ' '+secondaryResult+' ' + ' '; rContainer.innerHTML += ` ` + ' '+primaryResult+' ' + ' '; } } // Removes all current suggestions including shadowText function removeSuggestions() { sContainer.innerHTML = ""; rContainer.innerHTML = ""; shadowText.innerHTML = ""; currentSuggestion = ""; currentSuggestionIndex = 0; availableSuggestions = 0; sContainer.parentElement.classList.add("hidden"); rContainer.classList.add("hidden"); } ================================================ FILE: html/assets/js/tools/jotoTools.js ================================================ /* * Collection-File like utils.js but that are made specifically for Jotoba */ // The JotoTools "parent" function JotoTools () {}; // Creates a Jotoba-Search URL using the given parameters JotoTools.createUrl = function(searchText, searchType, targetPage, languageCode, sentenceIndex) { let url = window.location.origin; let hasQ = false; if (searchText !== undefined) { url += "/search/" + encodeURIComponent(searchText); } if (searchType !== undefined) { url += "?t=" + searchType; hasQ = true; } if (targetPage !== undefined) { url += (!hasQ ? "?p=" : "&p=") + targetPage; hasQ = true; } if (languageCode !== undefined) { url += (!hasQ ? "?l=" :"&l=") + languageCode; hasQ = true; } else { url = Util.addPageParameterIfNotNull(url, "l", !hasQ); } if (sentenceIndex !== undefined) { url += (!hasQ ? "?i=" :"&i=") + sentenceIndex; hasQ = true; } else { url = Util.addPageParameterIfNotNull(url, "i", !hasQ); } return url; } // Takes a link path starting with / and appends it to the Joto-URL (https://jotoba.de {/path}) JotoTools.pathToUrl = function(path) { return window.location.origin + path; } // Creates a Jotoba URL for the given page JotoTools.getPageUrl = function(pageName) { let url = window.location.origin; url += "/" + pageName; return url; } // Returns the currently searched string JotoTools.getCurrentSearch = function() { return document.location.pathname.split("/")[2]; } // Returns the value of the current Search [Words, Sentence...] JotoTools.getCurrentSearchType = function() { return $('#search-type').val(); } // Parses a language code into the Joto needs JotoTools.toJotobaLanguage = function(code) { code = code.toLowerCase().substr(0, 2); switch (code) { case "en": code = "en-US"; break; case "sv": code = "sv-SE"; break; case "ru": code = "ru"; break; case "hu": code = "hu"; break; default: code += "-"+code.toUpperCase(); if (!JotoTools.isSupportedSearchLang(code)) code = "en-US"; } return code; } // Checks if a given language code is supported as a search lang JotoTools.isSupportedSearchLang = function(code) { switch (code) { case "en-US": case "de-DE": case "es-ES": case "fr-FR": case "nl-NL": case "sv-SE": case "ru": case "hu": case "sl-SI": return true; default: return false; } } // Copies the given text and echoes the given Message JotoTools.copyTextAndEcho = function(text, messageID) { Util.copyToClipboard(text); Util.showMessage("success", getText(messageID)); } ================================================ FILE: html/assets/js/tools/ripple.js ================================================ !function(a,b,c){a.ripple=function(d,e){var f=this,g=f.log=function(){f.defaults.debug&&console&&console.log&&console.log.apply(console,arguments)};f.selector=d,f.defaults={debug:!1,on:"mousedown",opacity:.4,color:"auto",multi:!1,duration:.7,rate:function(a){return a},easing:"linear"},f.defaults=a.extend({},f.defaults,e);var h=function(b){var d,e,h=a(this);if(h.addClass("has-ripple"),e=a.extend({},f.defaults,h.data()),e.multi||!e.multi&&0===h.find(".ripple-a").length){if(d=a("").addClass("ripple-a"),d.appendTo(h),g("Create: Ripple"),!d.height()&&!d.width()){var i=c.max(h.outerWidth(),h.outerHeight());d.css({height:i,width:i}),g("Set: Ripple size")}if(e.rate&&"function"==typeof e.rate){var j=c.round(d.width()/e.duration),k=e.rate(j),l=d.width()/k;e.duration.toFixed(2)!==l.toFixed(2)&&(g("Update: Ripple Duration",{from:e.duration,to:l}),e.duration=l)}var m="auto"==e.color?h.css("color"):e.color,n={animationDuration:e.duration.toString()+"s",animationTimingFunction:e.easing,background:m,opacity:e.opacity};g("Set: Ripple CSS",n),d.css(n)}e.multi||(g("Set: Ripple Element"),d=h.find(".ripple-a")),g("Destroy: Ripple Animation"),d.removeClass("ripple-animate");var o=b.pageX-h.offset().left-d.width()/2,p=b.pageY-h.offset().top-d.height()/2;e.multi&&(g("Set: Ripple animationend event"),d.one("animationend webkitAnimationEnd oanimationend MSAnimationEnd",function(){g("Note: Ripple animation ended"),g("Destroy: Ripple"),a(this).remove()})),g("Set: Ripple location"),g("Set: Ripple animation"),d.css({top:p+"px",left:o+"px"}).addClass("ripple-animate")};a(b).on(f.defaults.on,f.selector,h)}}(jQuery,document,Math);$.ripple.version = "1.2.1"; $.ripple(".ripple", { debug: false, // Turn Ripple.js logging on/off on: 'mousedown', // The event to trigger a ripple effect opacity: 0.4, // The opacity of the ripple color: "auto", // Set the background color. If set to "auto", it will use the text color multi: false, // Allow multiple ripples per element duration: 0.7, // The duration of the ripple // Filter function for modifying the speed of the ripple rate: function(pxPerSecond) { return pxPerSecond; }, easing: 'linear' // The CSS3 easing function of the ripple }); ================================================ FILE: html/assets/js/tools/service-worker.js ================================================ // Currently unused. self.addEventListener('install', event => { }); self.addEventListener('activate', event => { }); self.addEventListener('fetch', event => { }); ================================================ FILE: html/assets/js/tools/theme.js ================================================ const themeEvent = new Event("theme-changed"); // Sets the color Theme to the given Value by passing a class to the :root element const setTheme = (theme) => { document.documentElement.className = theme; localStorage.setItem('theme', theme); Util.setMdlCheckboxState("use_dark_mode_settings", theme === "dark") document.dispatchEvent(themeEvent); } // Updates theme when changed by another tab (or console) window.addEventListener("storage", () => { let targetTheme = localStorage.getItem("theme"); if (targetTheme) { setTheme(targetTheme); } }) // Set theme from localStorage (if set) if (localStorage.getItem('theme')) { setTheme(localStorage.getItem('theme')); } // Else, set based on prefered color scheme else { Util.awaitDocumentReady(() => { window.matchMedia("(prefers-color-scheme: dark)").matches ? setTheme("dark") : setTheme("light"); }); } // listen for prefers-color-scheme changes window.matchMedia("(prefers-color-scheme: dark)").addEventListener( "change", e => setTheme(e.matches ? "dark" : "light") ); ================================================ FILE: html/assets/js/tools/utils.js ================================================ /** * This JS-File contains some functions that are commonly used */ // Constants const dateSettings = { year: 'numeric', month: 'short', day: 'numeric' }; // The util "parent" function Util () {}; // Runs callback fn when document is done loading DOM-Elements Util.awaitDocumentInteractive = function(callback) { let readyWait = window.setInterval(() => { if (document.readyState == "interactive" || document.readyState == "complete") { callback(); window.clearInterval(readyWait); } }, 10); } // Runs callback fn when document is done loading Util.awaitDocumentReady = function(callback) { let readyWait = window.setInterval(() => { if (document.readyState == "complete") { callback(); window.clearInterval(readyWait); } }, 10); } // Loads a script dynamically Util.loadScript = function(url, async, attributes, callback) { // Called without url? Return. if (url.length == 0) { return; } // Create the element var s = document.createElement('script'); s.setAttribute('src', url); s.onload = callback; if (async) { s.async = true; } // Add specific attributes for (let i = 0; i < attributes.length; i++) { s.setAttribute(attributes[i][0], attributes[i][1]); } // Append and load document.head.appendChild(s); } // Checks if a given element is overflown Util.checkOverflow = function(el) { var curOverflow = el.style.overflow; if (!curOverflow || curOverflow === "visible") el.style.overflow = "hidden"; var isOverflowing = el.clientWidth < el.scrollWidth || el.clientHeight < el.scrollHeight; el.style.overflow = curOverflow; return isOverflowing; } // Re-Encodes a decoded HTML Util.decodeHtml = function(html) { var doc = new DOMParser().parseFromString(html, "text/html"); return doc.documentElement.textContent; } // Changes the state of an MDL checkbox Util.setMdlCheckboxState = function(id, state) { if (state === undefined) { return; } let element = $('label[for='+id+']'); // Only attempt to apply change if element exists. if (element[0]){ if(state) { element[0].MaterialCheckbox.check(); } else { element[0].MaterialCheckbox.uncheck(); } } } // Parses the given Unix time to a date of the given language Util.toLocaleDateString = function(unixTime) { return new Date(unixTime).toLocaleDateString("de-DE", dateSettings); } // Returns whether the current page is index or not Util.isIndexPage = function() { return window.location.origin+"/" == document.location.href; } // Returns whether the current page is listed under {index}/{path} Util.isInPath = function(path) { return document.location.href.startsWith(window.location.origin+"/"+path); } ================================================ FILE: html/assets/js/tools/utils2.js ================================================ /** * This JS-File contains some functions that are commonly used * This file is supposed to be loaded asynchronously. Jotoba needs some things directly so they are located in a different file. */ // Displays the given message of type "succes", "error" or "info" Util.showMessage = function(type, message) { switch (type) { case "success": alertify.success(message); break; case "error": alertify.error(message); break; case "info": alertify.warning(message); } } // Copies the given string to clipboard Util.copyToClipboard = function(text) { const el = document.createElement('textarea'); el.value = text; el.setAttribute('readonly', ''); el.style.position = 'absolute'; el.style.left = '-9999px'; document.body.appendChild(el); el.select(); document.execCommand('copy'); document.body.removeChild(el); } // Convert a single 0-F to 0-15 Util.hex2num_single = function(hex) { if (hex < 10) return hex; switch(hex.toUpperCase()) { case "A": return 10; case "B": return 11; case "C": return 12; case "D": return 13; case "E": return 14; case "F": return 15; } } // Convert a single 0-15 to 0-F Util.num2hex_single = function(num) { if (num < 10) return num; switch(num) { case 10: return "A"; case 11: return "B"; case 12: return "C"; case 13: return "D"; case 14: return "E"; case 15: return "F"; } } // Returns the browsers true width Util.getBrowserWidth = function() { return Math.max( document.body.scrollWidth, document.documentElement.scrollWidth, document.body.offsetWidth, document.documentElement.offsetWidth, document.documentElement.clientWidth ); } // Removes any current drag selection (not supported on IE) Util.deleteSelection = function() { if (window.getSelection) { var selection = window.getSelection(); selection.empty(); } } // Scrolls to the destination in x miliseconds Util.scrollTo = function (final, duration) { var start = window.scrollY || document.documentElement.scrollTop, currentTime = null; var animateScroll = function(timestamp) { if (!currentTime) { currentTime = timestamp; } let progress = timestamp - currentTime; if(progress > duration) { progress = duration; } let val = Math.easeInOutQuad(progress, start, final-start, duration); window.scrollTo(0, val); if(progress < duration) { window.requestAnimationFrame(animateScroll); } }; window.requestAnimationFrame(animateScroll); }; // Checks if child is contained in parent Util.isChildOf = function (parent, child) { var node = child.parentNode; while (node != null) { if (node == parent) { return true; } node = node.parentNode; } return false; } // Splits the input by " " and returns the last result Util.getLastWordOfString = function(s) { let inputSplit = s.split(" "); return inputSplit[inputSplit.length-1]; } // Converts a Base64 Url to a JS File Util.convertDataURLtoFile = function(dataUrl, fileName) { var arr = dataUrl.split(','), mime = arr[0].match(/:(.*?);/)[1], bstr = atob(arr[1]), n = bstr.length, u8arr = new Uint8Array(n); while(n--){ u8arr[n] = bstr.charCodeAt(n); } return new File([u8arr], fileName, {type:mime}); } // Sends a file to the given API endpoint; callback => function Util.sendFilePostRequest = function(file, api, callback) { var formData = new FormData(); formData.append(file.name, file); var xhr = new XMLHttpRequest(); xhr.onreadystatechange = function() { if (xhr.readyState == XMLHttpRequest.DONE) { callback(xhr.responseText); } } xhr.open("POST", api); xhr.send(formData); } // Checks if a given URL contains an image and call the corresponding callback function Util.checkUrlIsImage = function(url, successCallback, errorCallback) { var image = new Image(); image.onload = function() { if (this.width > 0) { successCallback(); } } image.onerror = function() { errorCallback(); } image.src = url; } // Used for animation curves Math.easeInOutQuad = function (t, b, c, d) { t /= d/2; if (t < 1) return c/2*t*t + b; t--; return -c/2 * (t*(t-2) - 1) + b; }; // Returns the modulo of n and m but always makes them positive (-6, 4) = 2 Math.positiveMod = function(n, m) { return ((n % m) + m) % m; } // Opens the given URL in the current tab Util.loadUrl = function(url) { window.location = url; } // Tries to open URL in a new tab and keep focussed on current. Doesnt work in all browsers Util.loadUrlInNewTab = function(url) { window.open(url, '_blank').blur(); window.focus(); } // Tries to find the given parameter in the url and returns its value Util.getPageParameter = function(paramName) { var url_string = window.location.href; var url = new URL(url_string); var p = url.searchParams.get(paramName); return p; } // Adds a parameter to the given URL if location.href has a value set for it Util.addPageParameterIfNotNull = function(url, parameter, useQuestionmark) { let current = Util.getPageParameter(parameter); if (current !== null) { url += `${useQuestionmark ? "?" : "&"}${parameter}=${current}`; } return url; } // Sets a text field's cursor to the given position. -1 -> last position Util.setCaretPosition = function(elemId, caretPos) { var elem = document.getElementById(elemId); if (caretPos == -1) { caretPos = elem.value.length; } if(elem != null) { if(elem.createTextRange !== undefined) { var range = elem.createTextRange(); range.move('character', caretPos); range.select(); } else { if(elem.selectionStart !== undefined) { elem.setSelectionRange(caretPos, caretPos); } else elem.focus(); } } } // Check if the current browsers doesn't want the user to be tracked Util.checkTrackingAllowed = function() { try { if (window.doNotTrack || navigator.doNotTrack || navigator.msDoNotTrack || 'msTrackingProtectionEnabled' in window.external) { if (window.doNotTrack == "1" || navigator.doNotTrack == "yes" || navigator.doNotTrack == "1" || navigator.msDoNotTrack == "1") { return false; } else { return true; } } else { return true; } } catch (e) { return true; } } // MDL doesn't show the scroll-arrows on start. This should help. Util.mdlScrollFix = function(){ $(".mdl-layout__tab-bar-right-button").addClass("is-active"); } // Deletes all cookies whose names are within the given array Util.deleteSelectedCookies = function(cookieArray) { var allCookies = document.cookie.split(';'); for (var i = 0; i < allCookies.length; i++) { if (cookieArray.includes(allCookies[i])) { document.cookie = allCookies[i] + "=;expires="+ new Date(0).toUTCString()+";path=/;"; } else { document.cookie = allCookies[i]; } } } // Deletes all stored cookies Util.deleteAllCookies = function() { var allCookies = document.cookie.split(';'); for (var i = 0; i < allCookies.length; i++) { document.cookie = allCookies[i] + "=;expires="+ new Date(0).toUTCString()+";path=/;"; } } // Parses the given value into a boolean Util.toBoolean = function(value, defaultValue) { switch (value) { case 0: case "0": case "false": case false: return false; case 1: case "1": case "true": case true: return true; default: if (defaultValue) return defaultValue; return false; } } ================================================ FILE: html/assets/settings/manifest.json ================================================ { "name": "Jotoba", "short_name": "Jotoba", "start_url": "/", "display": "standalone", "background_color": "#fff", "description": "Jotoba is a powerful and free Japanese dictionary.", "icons": [ { "src": "/assets/jotokun/JotoHead.png", "sizes": "512x512", "type": "image/png" }, { "src": "/assets/jotokun/favicon.png", "sizes": "32x32", "type": "image/png" }] } ================================================ FILE: html/assets/settings/opensearch.xml ================================================ Jotoba Japanese dictionary search https://jotoba.de/assets/jotokun/favicon.png ================================================ FILE: jotoba_bin/Cargo.toml ================================================ [package] name = "jotoba" version = "0.1.0" authors = ["jojii "] edition = "2021" license = "GPLv3" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] news = { path = "../lib/news" } types = { path = "../lib/types" } sentence_reader = { path = "../lib/sentence_reader" } search = { path = "../lib/search" } api = { path = "../lib/api" } frontend = { path = "../lib/frontend" } config = { path = "../lib/config" } error = { path ="../lib/error"} localization = { path = "../lib/localization" } resources = { path = "../lib/resources" } indexes = { path = "../lib/indexes", features = ["parallel"] } actix-files = "0.6.2" actix-web = "4.3.1" argparse = "0.2.2" env_logger = "0.10.0" log = "0.4.19" sentry = { version = "0.31.5", optional = true } rayon = "1.7.0" snmalloc-rs = "0.3.4" #ngindex = { path = "../../ngindex" } ngindex = { git = "https://github.com/JojiiOfficial/ngindex"} actix-web-httpauth = "*" [features] default = ["img_scan"] sentry_error = ["sentry", "frontend/sentry_error"] img_scan = ["api/img_scan"] [dev-dependencies] criterion = "0.5.1" japanese = { path = "../lib/japanese" } [[bench]] name = "my_benchmark" harness = false [[bench]] name = "resources" harness = false ================================================ FILE: jotoba_bin/benches/my_benchmark.rs ================================================ use criterion::{criterion_group, criterion_main, Criterion}; use search::{ executor::SearchExecutor, query::{parser::QueryParser, Query, UserSettings}, word, }; use types::jotoba::{language::Language, search::SearchTarget}; #[global_allocator] static ALLOC: snmalloc_rs::SnMalloc = snmalloc_rs::SnMalloc; fn get_query(inp: &str, query_type: SearchTarget) -> Query { let mut settings = UserSettings::default(); settings.user_lang = Language::German; settings.show_english = true; QueryParser::new(inp.to_string(), query_type, settings) .parse() .unwrap() } fn load() { rayon::scope(move |s| { s.spawn(move |_| { resources::load("../resources/storage_data").unwrap(); }); s.spawn(move |_| { indexes::storage::load("../resources/indexes").unwrap(); }); s.spawn(|_| { // load ja nl parser since its lazy sentence_reader::load_parser("../resources/unidic-mecab") }); }); } fn criterion_benchmark(c: &mut Criterion) { load(); c.bench_function("search word: kanji", |b| { let query = get_query("kanji", SearchTarget::Words); b.iter(|| search(&query)) }); c.bench_function("search word: jp", |b| { let query = get_query("おはよう", SearchTarget::Words); b.iter(|| search(&query)) }); c.bench_function("search kanji reading", |b| { let query = get_query("事 ジ", SearchTarget::Words); b.iter(|| search(&query)) }); } #[inline] fn search(query: &Query) { let _res = SearchExecutor::new(word::Search::new(&query)).run(); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches); ================================================ FILE: jotoba_bin/benches/resources.rs ================================================ use criterion::{black_box, criterion_group, criterion_main, Criterion}; #[global_allocator] static ALLOC: snmalloc_rs::SnMalloc = snmalloc_rs::SnMalloc; fn load() { resources::load("../resources/storage_data").unwrap(); } fn criterion_benchmark(c: &mut Criterion) { load(); c.bench_function("Get Kanji", |b| { b.iter(|| { //let = resources::get().words(); resources::get().kanji().by_literal(black_box('跡')); }) }); let tests: Vec<&'static [char]> = vec![&['囗'], &['一'], &['囗', '一'], &['口'], &['口', '一']]; c.bench_function("Find by radicals", |b| { b.iter(|| { for i in &tests { api::app::radical::kanji::find_kanji(black_box(i)); } }) }); c.bench_function("Find by radicals light", |b| { b.iter(|| { api::app::radical::kanji::find_kanji(black_box(&['首'])); }) }); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches); ================================================ FILE: jotoba_bin/src/check.rs ================================================ use crate::webserver::prepare_data; use config::Config; use ngindex::index_framework::traits::{backend::Backend, storage::IndexStorage}; use types::jotoba::language::Language; /// Checks resources and returns `true` if required features are available pub fn resources() -> bool { let res = resources::get(); if res.check() { return true; } log::error!( "Missing required features: {:?}", res.missing_but_required() ); false } /// Checks integrity of all resources. Jotoba (should) work perfectly /// if this function does not fail (ignoring all the bugs and ugly code) pub fn check() { let res = check_all(); if res { println!("Success"); } else { println!("Failed"); } } fn check_all() -> bool { println!("Loading data"); let config = Config::new(None).expect("Config invalid"); prepare_data(&config); println!("Testing resources"); let res = resources(); println!("Testing indexes"); let ind = indexes(); res && ind } fn indexes() -> bool { words() && names() && sentences() && regex() } fn sentences() -> bool { let sentence_retrieve = resources::get().sentences(); let fg_index = indexes::get().sentence().foreign(); for language in Language::iter_word() { for id in fg_index.storage().iter().map(|i| *i.document()) { if sentence_retrieve.by_id(id).is_none() { println!("Sentence index ({language:?}) don't not match"); return false; } } } let jp_index = indexes::get().sentence().native(); for id in jp_index.storage().iter().map(|i| *i.document()) { if sentence_retrieve.by_id(id).is_none() { println!("Sentence index (Japanese) don't not match"); return false; } } true } fn names() -> bool { let name_retrieve = resources::get().names(); let transcr_index = indexes::get().name().foreign(); for i in transcr_index.storage().iter().map(|i| *i.item()) { if name_retrieve.by_sequence(i).is_none() { println!("Foreign name index does not match resources"); return false; } } let jp_index = indexes::get().name().native(); for i in jp_index.storage().iter().map(|i| *i.item()) { if name_retrieve.by_sequence(i).is_none() { println!("Japanese name index does not match resources"); return false; } } true } fn words() -> bool { let word_retrieve = resources::get().words(); for language in Language::iter_word() { let w_index = indexes::get() .word() .foreign(language) .expect(&format!("Missing index {:?}", language)); for doc_vec in w_index.storage().iter() { let seq_id = *doc_vec.document(); if word_retrieve.by_sequence(seq_id).is_none() { println!("Word and Index don't match"); return false; } } } let jp_index = indexes::get().word().native(); for vec in jp_index.storage().iter() { if word_retrieve.by_sequence(*vec.item()).is_none() { println!("Word and (Japanese) Index don't match"); return false; } } true } fn regex() -> bool { let w_retrieve = resources::get().words(); let regex_index = indexes::get().word().regex(); for (_, words) in regex_index.iter() { if words.iter().any(|i| w_retrieve.by_sequence(*i).is_none()) { println!("Regex index invalid"); return false; } } true } ================================================ FILE: jotoba_bin/src/cli.rs ================================================ use std::process::exit; use argparse::{ArgumentParser, Print, StoreTrue}; /// Command line arguments #[derive(Default)] pub struct Options { /// Start the server pub start: bool, pub debug: bool, pub check_resources: bool, } // Parse CLI args pub fn parse() -> Options { let mut options = Options::default(); { let mut ap = ArgumentParser::new(); ap.set_description("A multilang japanese dictionary"); ap.add_option( &["-V", "--version"], Print(env!("CARGO_PKG_VERSION").to_string()), "Show version", ); ap.refer(&mut options.start) .add_option(&["--start", "-s"], StoreTrue, "Start the server"); ap.refer(&mut options.debug) .add_option(&["--debug", "-d"], StoreTrue, "Run in debug mode"); ap.refer(&mut options.check_resources).add_option( &["--check", "-c"], StoreTrue, "Check resources", ); ap.parse_args_or_exit(); } if options.check_resources && options.start { println!("Can't use start and check_resources at once"); exit(1); } options } ================================================ FILE: jotoba_bin/src/main.rs ================================================ #![allow(irrefutable_let_patterns)] // Benchmarks say this is up to 50% faster #[global_allocator] static ALLOC: snmalloc_rs::SnMalloc = snmalloc_rs::SnMalloc; mod check; mod cli; mod webserver; #[actix_web::main] pub async fn main() { let options = cli::parse(); // Check resources on --check/-c if options.check_resources { check::check(); return; } // Start the webserver on --stat/-s if options.start { webserver::start(options).await.expect("webserver failed"); return; } // User didn't read the docs println!("Nothing to do. Use `-s` to start the dictionary"); } ================================================ FILE: jotoba_bin/src/webserver.rs ================================================ use actix_files::NamedFile; use actix_web_httpauth::{extractors::bearer::BearerAuth, middleware::HttpAuthentication}; use error::api_error::RestError; use indexes::storage::suggestions; use localization::TranslationDict; use actix_web::{ dev::ServiceRequest, http::{ header::{ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_ORIGIN, CACHE_CONTROL}, StatusCode, }, middleware::{self, Compat, Compress}, web::{self as actixweb, Data}, App, Error, HttpRequest, HttpResponse, HttpResponseBuilder, HttpServer, }; use config::Config; use log::{debug, warn}; use std::{path::Path, sync::Arc, thread, time::Instant}; use crate::{check, cli::Options}; /// How long frontend assets are going to be cached by the clients. Currently 1 week const ASSET_CACHE_MAX_AGE: u64 = 604800; /// Start the webserver pub(super) async fn start(options: Options) -> std::io::Result<()> { if options.debug { println!("DEBUG MODE ENABLED"); rayon::ThreadPoolBuilder::new() .num_threads(1) .build_global() .unwrap(); } setup_logger(); let start = Instant::now(); let config = Config::new(None).expect("config failed"); if options.debug { println!("{config:#?}"); } prepare_data(&config); let locale_dict_arc = load_translations(&config); #[cfg(feature = "sentry_error")] setup_sentry(&config); let address = config.server.listen_address.clone(); if !check() { return Ok(()); } debug!("Resource loading took {:?}", start.elapsed()); debug_info(); HttpServer::new(move || { let app = App::new() // Data .app_data(Data::new(config.clone())) .app_data(Data::new(locale_dict_arc.clone())) // Middlewares .wrap(middleware::Logger::default()) .service( actixweb::resource("/") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(frontend::index::index)), ) .service(actixweb::resource("/robots.txt").route(actixweb::get().to(robotstxt))) .service( actixweb::resource("/ready").route(actixweb::get().to(frontend::liveness::ready)), ) .service( actixweb::resource("/healthy") .route(actixweb::get().to(frontend::liveness::healthy)), ) .service( actixweb::resource("/docs.html") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(docs)), ) .service( actixweb::resource("/sitemap.xml") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(sitemap)), ) .service( actixweb::resource("/privacy") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(privacy)), ) .service( actixweb::resource("/service-worker.js") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(service_worker)), ) .service( actixweb::resource("/search/{query}") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(frontend::search_ep::search_ep)), ) .service( actixweb::resource("/search") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(frontend::search_ep::search_ep_no_js)), ) .service( actixweb::resource("/direct/{type}/{id}") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(frontend::direct::direct_ep)), ) .service( actixweb::resource("/about") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(frontend::about::about)), ) .service( actixweb::resource("/news") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(frontend::news_ep::news)), ) .service( actixweb::resource("/help") .wrap(Compat::new(middleware::Compress::default())) .route(actixweb::get().to(frontend::help_page::help)), ) .default_service(actix_web::Route::new().to(frontend::web_error::not_found)) // API .service( actixweb::scope("/api") .wrap( middleware::DefaultHeaders::new() .add((ACCESS_CONTROL_ALLOW_ORIGIN, "*")) .add((ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type")), ) .wrap(Compat::new(Compress::default())) .route("/", actixweb::get().to(docs)) .default_service(actix_web::Route::new().to(docs)) .service( actixweb::scope("app") .route( "k_comps", actixweb::post().to(api::app::search::kanji::reading_compounds), ) .route( "kanji", actixweb::post().to(api::app::search::kanji::search), ) .route( "names", actixweb::post().to(api::app::search::names::search), ) .route( "sentences", actixweb::post().to(api::app::search::sentences::search), ) .route( "words", actixweb::post().to(api::app::search::words::search), ) .service( actixweb::scope("details") .route( "word", actixweb::post().to(api::app::details::word::details), ) .route( "sentence", actixweb::post() .to(api::app::details::sentences::details_ep), ), ), ) .service( actixweb::scope("search") .route("words", actixweb::post().to(api::search::word::word_search)) .route( "kanji", actixweb::post().to(api::search::kanji::kanji_search), ) .route("names", actixweb::post().to(api::search::name::name_search)) .route( "sentences", actixweb::post().to(api::search::sentence::sentence_search), ), ) .service( actixweb::scope("internal") .wrap(HttpAuthentication::bearer(internal_validator)) .service(actixweb::scope("info").route( "words", actixweb::post().to(api::internal::info::words::word_info), )), ) .service( actixweb::scope("kanji") .route( "by_radical", actixweb::post().to(api::app::radical::kanji_by_radicals), ) .route( "decompgraph", actixweb::post().to(api::app::kanji::ids_tree::decomp_graph), ), ) .route( "/radical/search", actixweb::post().to(api::app::radical::search::search_radical), ) .route( "/suggestion", actixweb::post().to(api::app::completions::suggestion_ep), ) .route( "/os-suggestions", actixweb::get().to(api::app::completions::opensearch::suggestion_ep), ) .route("/img_scan", actixweb::post().to(api::app::img::scan_ep)) .route( "/news/short", actixweb::post().to(api::app::news::short::news), ) .route( "/news/detailed", actixweb::post().to(api::app::news::detailed::news), ), ) // Static files .service( actixweb::scope("/audio") .wrap( middleware::DefaultHeaders::new() .add((CACHE_CONTROL, format!("max-age={}", ASSET_CACHE_MAX_AGE))), ) .service( actix_files::Files::new("", config.server.get_audio_files()) .show_files_listing(), ), ) .service( actixweb::scope("/assets") .wrap( middleware::DefaultHeaders::new() .add((CACHE_CONTROL, format!("max-age={}", ASSET_CACHE_MAX_AGE))) .add((ACCESS_CONTROL_ALLOW_ORIGIN, "*")) .add((ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type")), ) .wrap(Compat::new(Compress::default())) .service( actix_files::Files::new("", config.server.get_html_files()) .show_files_listing(), ), ) .service( actixweb::scope("/variable_assets/{oma}/assets") .wrap( middleware::DefaultHeaders::new() .add((CACHE_CONTROL, format!("max-age={}", ASSET_CACHE_MAX_AGE))), ) .wrap(Compat::new(Compress::default())) .service( actix_files::Files::new("", config.server.get_html_files()) .show_files_listing(), ), ); //#[cfg(feature = "sentry_error")] //let app = app.wrap(sentry_actix::Sentry::new()); app }) .bind(&address)? .run() .await } async fn service_worker(config: Data, _req: HttpRequest) -> actix_web::Result { serve_html_file(config, "js/tools/service-worker.js").await } async fn privacy(config: Data, _req: HttpRequest) -> actix_web::Result { serve_html_file(config, "privacypolicy.html").await } async fn sitemap(config: Data, _req: HttpRequest) -> actix_web::Result { serve_html_file(config, "sitemap.xml").await } async fn serve_html_file(config: Data, file: &str) -> actix_web::Result { let htmlpath = Path::new(config.server.get_html_files()); let path = htmlpath.join(file); Ok(NamedFile::open(path)?) } async fn robotstxt(_req: HttpRequest) -> HttpResponse { HttpResponseBuilder::new(StatusCode::OK).body( r#"User-Agent: * Allow: / Sitemap: https://jotoba.com/sitemap.xml"#, ) } async fn docs(config: Data, _req: HttpRequest) -> actix_web::Result { let htmlpath = Path::new(config.server.get_html_files()); let filepath = Path::new("docs.html"); let path = htmlpath.join(filepath); Ok(NamedFile::open(path)?) } pub(crate) fn prepare_data(ccf: &Config) { let cf = ccf.clone(); thread::spawn(move || { suggestions::load(cf.get_suggestion_sources()).expect("Failed to load suggestions"); log::debug!("Suggestions loaded"); }); rayon::scope(move |s| { let cf = ccf.clone(); s.spawn(move |_| { log::debug!("Loading Resources"); load_resources(&cf.get_storage_data_path()); }); let cf = ccf.clone(); s.spawn(move |_| { log::debug!("Loading Indexes"); load_indexes(&cf); }); let cf = ccf.clone(); s.spawn(move |_| { log::debug!("Loading tokenizer"); load_tokenizer(&cf); }); let cf = ccf.clone(); s.spawn(move |_| clean_img_scan_dir(&cf)); let cf = ccf.clone(); s.spawn(move |_| { log::debug!("Loading News"); if let Err(err) = news::News::init(cf.server.get_news_folder()) { warn!("Failed to load news: {}", err); } }); }); } fn setup_logger() { env_logger::init_from_env(env_logger::Env::new().default_filter_or("debug")); } pub fn load_tokenizer(config: &Config) { sentence_reader::load_parser(&config.get_unidic_dict()); } /// Clears uploaded images which haven't been cleared yet fn clean_img_scan_dir(config: &Config) { let path = config.get_img_scan_upload_path(); let path = Path::new(&path); if !path.exists() || !path.is_dir() { return; } std::fs::remove_dir_all(&path).expect("Failed to clear img scan director"); } fn debug_info() { log::debug!("All features: {:?}", resources::Feature::all()); log::debug!("Supported: {:?}", resources::get().get_features()); log::debug!("Not supported: {:?}", resources::get().missing_features()); } pub fn load_resources(src: &str) { let start = Instant::now(); resources::load(src).expect("Failed to load resource storage"); debug!("Resources took: {:?}", start.elapsed()); } fn load_translations(config: &Config) -> Arc { let locale_dict = TranslationDict::new( config.server.get_locale_path(), localization::language::Language::English, ) .expect("Failed to load localization files"); Arc::new(locale_dict) } pub fn load_indexes(config: &Config) { indexes::storage::load(config.get_indexes_source()).expect("Failed to load index files"); } fn check() -> bool { if !check::resources() { log::error!("Not all required data found! Exiting"); return false; } if !indexes::get().check() { log::error!("Not all indexes are available!"); return false; } /* if !indexes::get_suggestions().check() { log::error!("Not all suggestion indexes are available!"); //return false; } */ true } #[cfg(feature = "sentry_error")] fn setup_sentry(config: &Config) { if let Some(ref sentry_config) = config.sentry { use std::mem::ManuallyDrop; // We want to run sentry all the time so don't drop here let _guard = ManuallyDrop::new(sentry::init(( sentry_config.dsn.as_str(), sentry::ClientOptions { release: sentry::release_name!(), ..Default::default() }, ))); std::env::set_var("RUST_BACKTRACE", "1"); } } async fn internal_validator( req: ServiceRequest, credentials: BearerAuth, ) -> Result { let config = req.app_data::>().unwrap(); let key = &config.server.internal_api_key; if key.is_empty() || key != credentials.token() { let err: Error = RestError::Unauthorized.into(); return Err((err, req)); } Ok(req) } ================================================ FILE: lib/api/Cargo.toml ================================================ [package] name = "api" version = "0.1.0" authors = ["jojii "] edition = "2021" [dependencies] japanese = { path = "../japanese" } engine = { path = "../engine" } news = { path = "../news" } error = { path = "../error", features = ["web_error"] } sentence_reader = { path = "../sentence_reader" } search = { path = "../search" } utils = { path = "../utils" } config = { path = "../config" } resources = { path = "../resources" } indexes = { path = "../indexes" } types = { path = "../types", default-features = false } actix-web = "4.3.1" actix-multipart = "0.6.0" itertools = "0.11.0" once_cell = { version = "1.18.0", default-features = false } serde = "1.0.171" log = "0.4.19" wana_kana = { git = "https://github.com/WeDontPanic/wana_kana_rust" } #jpeudex = { path = "../../../jpeudex"} jpeudex = { git = "https://github.com/JojiiOfficial/jpeudex" } bincode = "1.3.3" regex = { version = "1.7.1", features = ["std"], default-features = false } #autocompletion = { path = "../../../AutoCompletionFramework" } autocompletion = { git = "https://github.com/WeDontPanic/AutoCompletionFramework" } intmap = { git = "https://github.com/JojiiOfficial/rust-intmap" } #priority_container = "0.1.1" priority_container = { git = "https://github.com/JojiiOfficial/PrioContainer/" } futures = { version = "0.3.28", optional = true } leptess = { version = "0.14.0", optional = true } order_struct = { git = "https://github.com/JojiiOfficial/OrderStruct" } #ids_parser = { path = "../../../ids_parser" } ids_parser = { git = "https://github.com/JojiiOfficial/IDS-Parser" } #index_framework = { path = "../../../index_framework" } index_framework = { git = "https://github.com/WeDontPanic/index_framework" } serde_json = "1.0.100" jp_utils = { git = "https://github.com/JojiiOfficial/jp_utils"} [features] default = [] img_scan = ["leptess", "futures"] ================================================ FILE: lib/api/src/app/completions/kanji/meaning.rs ================================================ use super::super::{convert_results, words::foreign::try_romaji, Response}; use autocompletion::suggest::{ extension::{ngram::NGramExtension, similar_terms::SimilarTermsExtension}, query::SuggestionQuery, task::SuggestionTask, }; use search::query::Query; /// Returns kanji meaning suggestions pub fn suggestions(query: &Query) -> Option { let index = indexes::get_suggestions().kanji_meanings(); let mut suggestion_task = SuggestionTask::new(30); let mut def_query = SuggestionQuery::new(index, &query.query_str); let mut ng_ext = NGramExtension::new(index); ng_ext.options.weights.freq_weight = 0.5; ng_ext.options.weights.total_weight = 0.7; def_query.add_extension(ng_ext); suggestion_task.add_query(def_query); if let Some(hira_query) = try_romaji(&query.query_str) { let jp_index = indexes::get_suggestions().jp_words(); let mut rom_sug_query = SuggestionQuery::new(jp_index, hira_query); rom_sug_query.weights.total_weight = 0.5; let mut similar_terms = SimilarTermsExtension::new(jp_index, 4); similar_terms.options.weights.total_weight = 0.2; rom_sug_query.add_extension(similar_terms); suggestion_task.add_query(rom_sug_query); } let suggestions = convert_results(suggestion_task.search()); Some(Response::new(suggestions)) } ================================================ FILE: lib/api/src/app/completions/kanji/mod.rs ================================================ pub mod meaning; pub mod reading; use search::query::{Query, QueryLang}; use types::api::app::completions::Response; use wana_kana::to_romaji::to_romaji; /// Returns kanji suggestions pub(crate) fn suggestions(query: Query) -> Option { match query.q_lang { QueryLang::Foreign => meaning::suggestions(&query), QueryLang::Japanese => japanese_suggestions(&query), /* QueryLang::Korean => todo!(), QueryLang::Undetected => todo!(), */ _ => None, } } fn japanese_suggestions(query: &Query) -> Option { let romaji = to_romaji(query.query_str.as_str()); let mut suggestions = super::words::native::suggestions(&query, &romaji, &[])?; // romev entries without kanji suggestions.retain(|i| i.secondary.is_some()); Some(Response { suggestions, ..Default::default() }) } ================================================ FILE: lib/api/src/app/completions/kanji/reading.rs ================================================ use engine::Engine; use index_framework::traits::{ backend::Backend, dictionary::IndexDictionary, postings::IndexPostings, }; use japanese::ToKanaExt; use order_struct::order_nh::OrderVal; use priority_container::PrioContainerMax; use search::engine::words::native::k_reading; use types::{ api::app::completions::{Response, SuggestionType, WordPair}, jotoba::kanji, }; /// Gets suggestions for kanji reading search eg: "痛 いた.い" pub fn suggestions(kanji_reading: kanji::reading::ReadingSearch) -> Option { let kanji_storage = resources::get().kanji(); let query_reading = kanji_reading .reading .replace("。", "") .replace(".", "") .to_hiragana(); let kanji = kanji_storage.by_literal(kanji_reading.literal)?; let mut queue = PrioContainerMax::new(30); let iter = kanji .kunyomi .iter() .chain(kanji.onyomi.iter()) .map(|i| WordPair::with_secondary(i.clone(), kanji.literal.to_string())) .map(|wp| { let score = score(kanji.literal, &wp.primary, &query_reading); OrderVal::new(wp, score) }); queue.extend(iter); if queue.is_empty() { return None; } let mut vec: Vec<_> = queue.into_iter().map(|i| i.0.into_inner()).collect(); vec.reverse(); Some(Response::with_type(vec, SuggestionType::KanjiReading)) } fn score(literal: char, reading: &str, query: &str) -> usize { let mut score = 0; // Show written prefixes on top if query.len() > 0 && starts_with(reading, query) { score += 1000000; } // Show readings with more results first let index = k_reading::Engine::get_index(None); let score_qurey = format!("{}{}", literal, reading); if let Some(term_id) = index.dict().get_id(&score_qurey) { let posting = index.postings(0).unwrap().get_posting(term_id); score += (posting.len() as f32).log(1.01).floor() as usize; } score } #[inline] fn starts_with(word: &str, reading: &str) -> bool { word.replace(".", "").to_hiragana().starts_with(reading) } ================================================ FILE: lib/api/src/app/completions/mod.rs ================================================ mod kanji; mod names; pub mod opensearch; mod request; mod words; use actix_web::web::Json; use jp_utils::JapaneseExt; use search::query::{Form, Query}; use types::{ api::app::completions::{Request, Response, SuggestionType, WordPair}, jotoba::{kanji::reading::ReadingSearch, search::SearchTarget}, }; use words::hashtag; pub async fn suggestion_ep(payload: Json) -> Result, actix_web::Error> { Ok(Json(suggestion_ep_inner(payload.into_inner())?)) } /// Get search suggestions endpoint pub(crate) fn suggestion_ep_inner(payload: Request) -> Result { request::validate(&payload)?; if payload.hashtag { let suggestions = hashtag::suggestions(&payload.input, payload.search_target); if let Some(res) = suggestions { return Ok(Response::with_type(res, SuggestionType::Hashtag)); } return Ok(Response::default()); } // Adjust payload and parse to query let (query, radicals) = request::get_query(request::adjust(payload))?; // Eg. when tags get parsed, the query becomes empty if query.query_str.trim().is_empty() { return Ok(Response::default()); } Ok(get_suggestions(query, radicals)) } /// Returns best matching suggestions for the given query fn get_suggestions(query: Query, radicals: Vec) -> Response { let res = match query.target { SearchTarget::Kanji => kanji::suggestions(query), SearchTarget::Names => names::suggestions(query), SearchTarget::Words | SearchTarget::Sentences => { if let Some(kanji_reading) = as_kanji_reading(&query) { kanji::reading::suggestions(kanji_reading) } else { words::suggestions(query, &radicals) } } }; res.unwrap_or_default() } /// Returns Some(KanjiReading) if query is or 'could be' a kanji reading query. /// "Could be" means that a kanji-reading search is being types. This the case /// if a single kanji and a space is written in the current query fn as_kanji_reading(query: &Query) -> Option { match &query.form { Form::KanjiReading(r) => Some(r.clone()), _ => { let mut query_str = query.raw_query.chars(); let first = query_str.next()?; let second = query_str.next()?; if first.is_kanji() && second == ' ' { Some(ReadingSearch { reading: String::new(), literal: first, }) } else { None } } } } /// Converts engine output to a set of `WordPair` #[inline] pub(crate) fn convert_results(engine_output: Vec) -> Vec { engine_output .into_iter() .map(|i| WordPair { primary: i.primary, secondary: i.secondary, }) .collect() } ================================================ FILE: lib/api/src/app/completions/names/mod.rs ================================================ use super::{convert_results, Response}; use autocompletion::suggest::{ extension::ngram::NGramExtension, query::SuggestionQuery, task::SuggestionTask, }; use japanese::ToKanaExt; use search::query::{Query, QueryLang}; use wana_kana::to_katakana::to_katakana; /// Returns name suggestions pub(crate) fn suggestions(query: Query) -> Option { match query.q_lang { QueryLang::Japanese => native_suggestions(&query), QueryLang::Foreign => transcription_suggestions(&query), _ => None, } } /// Returns trascripted name suggestions pub fn transcription_suggestions(query: &Query) -> Option { let query_str = &query.query_str; let index = indexes::get_suggestions().names_foreign(); let mut task = SuggestionTask::new(30); let mut def_query = SuggestionQuery::new(index, query_str); let ng_ext = NGramExtension::new(index); def_query.add_extension(ng_ext); task.add_query(def_query); if let Some(romaji_query) = super::words::foreign::try_romaji(query_str) { let jp_index = indexes::get_suggestions().names_native(); task.add_query(SuggestionQuery::new(jp_index, romaji_query.clone())); let katakana = to_katakana(romaji_query.as_str()); if katakana != romaji_query { task.add_query(SuggestionQuery::new(index, katakana)); } } let suggestions = convert_results(task.search()); Some(Response::new(suggestions)) } /// Returns native name suggestions pub fn native_suggestions(query: &Query) -> Option { let query_str = &query.query_str; let index = indexes::get_suggestions().names_native(); let mut task = SuggestionTask::new(30); let mut def_query = SuggestionQuery::new(index, query_str); let ng_ext = NGramExtension::new(index); def_query.add_extension(ng_ext); task.add_query(def_query); let katakana = to_katakana(query_str.as_str()); if &katakana != query_str { task.add_query(SuggestionQuery::new(index, katakana)); } let hiragana = query_str.to_hiragana(); if &hiragana != query_str { task.add_query(SuggestionQuery::new(index, hiragana)); } let suggestions = convert_results(task.search()); Some(Response { suggestions, ..Default::default() }) } ================================================ FILE: lib/api/src/app/completions/opensearch/mod.rs ================================================ mod parse; use actix_web::web; use serde::Deserialize; use types::{api::app::completions::Request, jotoba::search::SearchTarget}; #[derive(Deserialize)] pub struct EPQuery { q: String, } pub async fn suggestion_ep(query: web::Query) -> Result { let raw_query = query.into_inner().q; let parsed = parse::parse(raw_query.clone()); let s_target = parsed.search_target().unwrap_or(SearchTarget::Words); let query = make_request(parsed.query.clone(), s_target); let suggestions = get_suggestions(query)?; Ok(gen_output(suggestions, raw_query)) } fn get_suggestions(query: Request) -> Result, actix_web::Error> { let s_target = query.search_target; let res = super::suggestion_ep_inner(query)? .suggestions .iter() .map(|i| { let mut s = i.secondary_preferred().to_string(); if s_target != SearchTarget::Words { s.push_str(&format!(" #{s_target:?}")); } s }) .collect::>(); Ok(res) } fn gen_output(suggestions: Vec, raw_query: String) -> String { let mut data = serde_json::to_string(&[suggestions, vec![], vec![]]).unwrap_or_else(|_| "".to_string()); if data.len() > 2 { data = data[1..(data.len() - 1)].to_string(); } format!("[\"{raw_query}\",{data}]") } fn make_request(inp: String, search_target: SearchTarget) -> Request { Request { input: inp, lang: "en".to_string(), search_target, radicals: vec![], hashtag: false, } } ================================================ FILE: lib/api/src/app/completions/opensearch/parse.rs ================================================ use search::query::{parser::QueryParser, Tag, UserSettings}; use types::jotoba::search::SearchTarget; pub(crate) fn parse(inp: String) -> Parsed { let query = QueryParser::new(inp.clone(), SearchTarget::Words, UserSettings::default()).parse(); if query.is_none() { return Parsed::new(inp.to_string()); } let query = query.unwrap(); let tags = query.tags; Parsed::with_tags(query.query_str, tags) } pub(crate) struct Parsed { pub query: String, pub tags: Vec, } impl Parsed { #[inline] fn new(query: String) -> Self { Self { query, tags: vec![], } } #[inline] fn with_tags(query: String, tags: Vec) -> Self { Self { query, tags } } #[inline] pub fn search_target(&self) -> Option { self.tags .iter() .find(|i| i.is_search_type()) .map(|i| i.as_search_type().unwrap()) .copied() } } ================================================ FILE: lib/api/src/app/completions/request.rs ================================================ use std::str::FromStr; use error::api_error::RestError; use jp_utils::JapaneseExt; use search::query::{self, parser::QueryParser, Query, QueryLang, UserSettings}; use types::{api::app::completions::Request, jotoba::language::Language}; use utils::real_string_len; /// Adjust the query and returns a newly allocated one pub(crate) fn adjust(request: Request) -> Request { let mut query_str = request.input.to_string(); let query_len = real_string_len(&request.input); // Some inputs place the roman letter of the japanese text while typing with romanized input. // If input is japanese but last character is a romanized letter, strip it off let lang = query::parser::lang::parse(&query_str); if lang == QueryLang::Japanese && query_str.ends_with("n") { query_str = query_str.replace("n", "ん"); } let last_chars = query_str.chars().rev().take(2).collect::>(); if lang == QueryLang::Japanese && !last_chars.iter().any(|i| !i.is_japanese()) && query_len > 1 && !last_chars.is_empty() { let len: usize = last_chars .into_iter() .filter(|i| i.is_roman_letter()) .map(|i| i.len_utf8()) .sum(); query_str = query_str[..query_str.len() - len].to_string(); } Request { input: query_str.to_owned(), ..request } } /// Returns a `Query` based on the `Request` pub(crate) fn get_query(request: Request) -> Result<(Query, Vec), RestError> { let query_str = request.input.trim_start().to_string(); let search_type = request.search_target; let settings = UserSettings { user_lang: get_language(&request), ..UserSettings::default() }; // Build and parse the query let query = QueryParser::new(query_str, search_type, settings) .parse() .ok_or(RestError::BadRequest)?; Ok((query, request.radicals)) } /// Returns the user configured language of the [`Request`] #[inline] pub(crate) fn get_language(request: &Request) -> Language { Language::from_str(&request.lang).unwrap_or_default() } /// Validates the API request payload pub(crate) fn validate(payload: &Request) -> Result<(), RestError> { let query_len = real_string_len(&payload.input.trim()); if (query_len < 1 && !payload.hashtag) || query_len > 37 { return Err(RestError::BadRequest.into()); } Ok(()) } ================================================ FILE: lib/api/src/app/completions/words/foreign.rs ================================================ use autocompletion::suggest::{ extension::{ kanji_align::KanjiAlignExtension, ngram::NGramExtension, similar_terms::SimilarTermsExtension, }, query::SuggestionQuery, task::SuggestionTask, }; use japanese::{ guessing::{could_be_romaji, is_romaji_repl}, to_hira_fmt, }; use types::jotoba::language::Language; use utils::real_string_len; use super::super::*; /// Returns suggestions based on non japanese input pub fn suggestions(query: &Query, query_str: &str) -> Option> { let query_lower = autocompletion::index::basic::basic_format(query_str.trim()); let mut task = SuggestionTask::new(30); let lang = query.settings.language(); // Default search query task.add_query(new_suggestion_query(&query_lower, lang)?); // Add results for english if query.settings.show_english() { let mut en_sugg_query = new_suggestion_query(&query_lower, Language::English)?; en_sugg_query.weights.total_weight = 0.75; en_sugg_query.weights.freq_weight = 0.15; task.add_query(en_sugg_query); } // Romaji result //if let Some(hira_query) = try_romaji(query_str.trim()) { let hira_query = try_romaji(query_str.trim()).unwrap_or_else(|| japanese::to_hira_fmt(query_str)); //let hira_query = query_str.to_hiragana(); println!("hira query: {hira_query}"); let jp_engine = indexes::get_suggestions().jp_words(); let mut rom_query = SuggestionQuery::new(jp_engine, hira_query.clone()); if could_be_romaji(query_str) { rom_query.weights.total_weight = 0.99; } else { rom_query.weights.total_weight = 0.5; } /* query.weights.freq_weight = 0.1; query.weights.str_weight = 1.9; */ let mut k_r_align = KanjiAlignExtension::new(jp_engine); k_r_align.options.weights.freq_weight = 1.0; k_r_align.options.threshold = 5; rom_query.add_extension(k_r_align); let mut similar_terms = SimilarTermsExtension::new(jp_engine, 14); similar_terms.options.threshold = 10; similar_terms.options.weights.total_weight = 0.75; similar_terms.options.weights.freq_weight = 0.2; similar_terms.options.weights.str_weight = 1.8; similar_terms.options.min_query_len = 4; rom_query.add_extension(similar_terms); let mut ng_ext = NGramExtension::with_sim_threshold(jp_engine, 0.4); ng_ext.options.threshold = 5; ng_ext.options.weights.total_weight = 0.25; ng_ext.options.weights.freq_weight = 0.02; ng_ext.query_weigth = 0.15; ng_ext.options.limit = 100; ng_ext.query_weigth = 0.05; ng_ext.options.min_query_len = 5; ng_ext.cust_query = Some(hira_query.clone()); rom_query.add_extension(ng_ext); task.set_rel_mod(|i, rel| { let out = i.to_output(); let kana = &out.primary; if japanese::romaji_prefix(query_str.trim(), &kana) { return rel + 1000; } rel }); task.add_query(rom_query); //} Some(convert_results(task.search())) } fn new_suggestion_query(query: &str, lang: Language) -> Option { let engine = indexes::get_suggestions().foreign_words(lang)?; let mut suggestion_query = SuggestionQuery::new(engine, &query); suggestion_query.weights.str_weight = 1.5; suggestion_query.weights.freq_weight = 0.5; let mut ng_ex = NGramExtension::with_sim_threshold(engine, 0.5); ng_ex.options.weights.total_weight = 0.7; ng_ex.options.weights.freq_weight = 0.05; ng_ex.query_weigth = 0.05; ng_ex.options.min_query_len = 5; ng_ex.options.limit = 100; ng_ex.options.threshold = 5; suggestion_query.add_extension(ng_ex); Some(suggestion_query) } /// Returns Some(String) if `query_str` could be (part of) romaji search input and None if not pub(crate) fn try_romaji(query_str: &str) -> Option { let mut query_str = query_str.replace("-", "ー"); if query_str.ends_with("m") { query_str.pop(); } let query_str = &query_str; let str_len = real_string_len(query_str); if str_len < 3 || query_str.contains(' ') { return None; } if let Some(v) = is_romaji_repl(query_str) { return Some(to_hira_fmt(&v)); } if str_len < 3 { return None; } // 'n' is the only hiragana with with=1 in romaji so allow them // to be treated properly too let min_len = 3usize.saturating_sub(query_str.chars().filter(|i| *i == 'n').count()); // Strip one to avoid switching between romaji/normal results if str_len > min_len { let prefix = strip_str_end(query_str, 1); if let Some(v) = is_romaji_repl(prefix) { return Some(to_hira_fmt(&v)); } } // shi ending needs more stripping but also more existing romaji to not // heavily overlap with other results if str_len >= min_len + 2 && end_three_char_kana(query_str) { let prefix = strip_str_end(query_str, 2); if let Some(v) = is_romaji_repl(prefix) { return Some(to_hira_fmt(&v)); } } None } /// Returns a substring of `inp` with `len` amount of tailing characters being removed. /// This works for non UTF-8 as well. If len > |inp| "" gets returned #[inline] pub fn strip_str_end(inp: &str, len: usize) -> &str { match inp.char_indices().rev().nth(len - 1).map(|i| i.0) { Some(end) => &inp[..end], None => "", } } /// Returns `true` if `s` ends with 2 of 3 3-char kana romaji #[inline] fn end_three_char_kana(s: &str) -> bool { [ "sh", "ch", "ts", "hy", "ky", "ny", "my", "gy", "ry", "by", "py", ] .iter() .any(|i| s.ends_with(i)) } #[cfg(test)] mod test { use super::*; #[test] fn test_strip_end() { let inp = "これはかっこいいテキスト"; assert_eq!(strip_str_end(inp, 1), "これはかっこいいテキス"); assert_eq!(strip_str_end(inp, 2), "これはかっこいいテキ"); assert_eq!(strip_str_end(inp, 3), "これはかっこいいテ"); } } ================================================ FILE: lib/api/src/app/completions/words/hashtag.rs ================================================ use index_framework::traits::{backend::Backend, storage::IndexStorage}; use std::ops::Deref; use types::{api::app::completions::WordPair, jotoba::search::SearchTarget}; pub fn suggestions(query: &str, search_target: SearchTarget) -> Option> { if query.trim().is_empty() { return Some(empty(search_target)); } let index = indexes::get_suggestions().hashtags(); let res = index.ngram_search(query, &[search_target]); let max = res.first()?.1; let out: Vec<_> = res .into_iter() .filter(|i| i.1 >= max - 0.4) .map(|i| WordPair::new(i.0.tag.clone())) .collect(); Some(out) } fn empty(search_target: SearchTarget) -> Vec { let start = std::time::Instant::now(); let index = &indexes::get_suggestions().hashtags(); let ngindex = index.index.deref(); let mut out: Vec<_> = ngindex .storage() .iter() .map(|i| index.get(i.into_item() as usize).unwrap()) .filter(|i| i.s_targets.contains(&search_target)) .collect(); out.sort_by(|a, b| a.freq.total_cmp(&b.freq).reverse()); let res = out .into_iter() .take(10) .map(|i| WordPair::new(i.tag.clone())) .collect(); println!("took: {:?}", start.elapsed()); res } ================================================ FILE: lib/api/src/app/completions/words/kana_end_ext.rs ================================================ use autocompletion::{ index::{ japanese::{Item, JapaneseIndex}, IndexItem, }, relevance::{item::EngineItem, RelevanceCalc}, suggest::{ extension::{Extension, ExtensionOptions}, query::SuggestionQuery, }, }; use jp_utils::JapaneseExt; use priority_container::PrioContainerMax; #[derive(Clone, Copy)] pub struct KanaEndExtension<'a> { pub options: ExtensionOptions, index: &'a JapaneseIndex, max_dist: u32, } impl<'a> KanaEndExtension<'a> { /// Create a new Longest-Prefix Extension pub fn new(index: &'a JapaneseIndex, max_dist: u32) -> Self { let mut options = ExtensionOptions::default(); options.weights.freq_weight = 0.01; Self { options, index, max_dist, } } } impl<'a> Extension<'a> for KanaEndExtension<'a> { #[inline] fn run(&self, query: &SuggestionQuery, rel_weight: f64) -> Vec> { let query_str = &query.query_str; let first_char = &query.query_str.chars().nth(0).unwrap(); let last_char = &query.query_str.chars().last().unwrap(); if !first_char.is_kanji() || !last_char.is_kana() { return vec![]; } let mut parts: Vec<_> = jp_utils::tokenize::by_alphabet(&query_str, true) .filter(|i| !i.trim().is_empty()) .collect(); if parts.len() != 2 { return vec![]; } let kanji_part = parts.remove(0); let kana_part = parts.remove(0); let kana_hash = jpeudex::Hash::new(kana_part); let rel_weight = rel_weight * self.options.weights.total_weight; let mut out = PrioContainerMax::new(self.options.limit); let rel_calc = RelevanceCalc::new(self.options.weights).with_total_weight(rel_weight); let items = self.index.trie.iter_prefix_str(kanji_part); for j in items.map(|i| i.1).flatten() { let word = self.index.get_item(*j); if word.kanji.is_none() { continue; } let similarity = match word_similarity(word, kanji_part, kana_part, &kana_hash) { Some(s) => s, None => continue, }; if similarity > self.max_dist { continue; } let mut item = word.into_engine_item(); let str_rel = item.inner().str_relevance(&query.query_str); let rel = rel_calc.calc(&item, str_rel); item.set_relevance(rel); out.insert(item); } let out = out.into_iter().map(|i| i.0).collect::>(); let rel_calc = RelevanceCalc::new(self.options.weights).with_total_weight(rel_weight); query.order_items(out, rel_calc) } #[inline] fn should_run(&self, already_found: usize, _query: &SuggestionQuery) -> bool { self.options.enabled && already_found < self.options.threshold } #[inline] fn get_options(&self) -> &ExtensionOptions { &self.options } } #[inline] fn word_similarity( item: &Item, kanji: &str, kana: &str, kana_hash: &Option, ) -> Option { let item_kanji = item.kanji.as_ref().unwrap(); if item.kana.ends_with(kana) && item_kanji.starts_with(kanji) { return Some(0); } if let Some(found_sub) = find_kana_str(&item.kana, kana) { let item_part = &item.kana[found_sub..]; let l = item_part.chars().count(); let kana_len = kana.chars().count(); return Some((l - kana_len) as u32 * 2); } if let Some(kana_hash) = &kana_hash { let item_kana_hash = jpeudex::Hash::new(&item.kana)?; let dist = (item_kana_hash - *kana_hash).dist(); return Some(dist); } None } /// Requires `full_kana` to be longer than `end_sub` fn find_kana_str(full_kana: &str, end_sub: &str) -> Option { full_kana.match_indices(end_sub).last().map(|i| i.0) } ================================================ FILE: lib/api/src/app/completions/words/mod.rs ================================================ pub mod foreign; pub mod hashtag; pub mod kana_end_ext; pub mod native; use std::{cmp::Ordering, time::Instant}; use jp_utils::JapaneseExt; use search::query::{Query, QueryLang}; use types::api::app::completions::{Response, WordPair}; use utils::bool_ord; use wana_kana::{to_katakana::to_katakana, to_romaji::to_romaji}; /// Returns word suggestions based on the query. Applies various approaches to give better results pub(crate) fn suggestions(query: Query, radicals: &[char]) -> Option { let response = try_word_suggestions(&query, radicals)?; // Tries to do a katakana search if nothing was found let result = if response.is_empty() && query.query_str.is_hiragana() { try_word_suggestions(&get_katakana_query(&query), radicals)? } else { response }; Some(Response::new(result)) } /// Returns Ok(suggestions) for the given query ordered and ready to display fn try_word_suggestions(query: &Query, radicals: &[char]) -> Option> { let start = Instant::now(); // Get sugesstions for matching language let romaji_query = to_romaji(query.query_str.as_str()); let word_pairs = match query.q_lang { QueryLang::Japanese => native::suggestions(&query, &romaji_query, radicals)?, QueryLang::Foreign | QueryLang::Undetected | QueryLang::Korean => { let mut res = foreign::suggestions(&query, &query.query_str).unwrap_or_default(); // Order: put exact matches to top res.sort_by(|a, b| word_pair_order(a, b, &query.query_str)); res } }; log::debug!("Suggestions took: {:?}", start.elapsed()); Some(word_pairs) } /// Ordering for [`WordPair`]s which puts the exact matches to top #[inline] fn word_pair_order(a: &WordPair, b: &WordPair, query: &str) -> Ordering { bool_ord(a.has_reading(&query), b.has_reading(&query)) } /// Returns an equivalent katakana query fn get_katakana_query(query: &Query) -> Query { Query { query_str: to_katakana(query.query_str.as_str()), ..query.clone() } } ================================================ FILE: lib/api/src/app/completions/words/native.rs ================================================ use super::{super::*, kana_end_ext::KanaEndExtension}; use autocompletion::{ index::{str_item::StringItem, IndexItem}, suggest::{ extension::{ kanji_align::KanjiAlignExtension, ngram::NGramExtension, similar_terms::SimilarTermsExtension, }, query::SuggestionQuery, task::SuggestionTask, }, }; use wana_kana::ConvertJapanese; const MAX_SENTENCE_LEN: usize = 15; /// Get suggestions for foreign search input pub fn suggestions(query: &Query, _romaji_query: &str, radicals: &[char]) -> Option> { let jp_engine = indexes::get_suggestions().jp_words(); let query_str = query.query_str.as_str(); let mut suggestion_task = SuggestionTask::new(30); let mut main_sugg_query = SuggestionQuery::new(jp_engine, query_str); main_sugg_query.weights.str_weight = 1.2; // Kanji reading align (くにうた ー> 国歌) let mut k_r_align = KanjiAlignExtension::new(jp_engine); k_r_align.options.weights.freq_weight = 1.0; k_r_align.options.threshold = 5; main_sugg_query.add_extension(k_r_align); // Find 天気予報 even if 天気よほう was written let mut kana_end_ext = KanaEndExtension::new(jp_engine, 10); kana_end_ext.options.weights.total_weight = 0.45; kana_end_ext.options.weights.freq_weight = 0.4; main_sugg_query.add_extension(kana_end_ext); let (norm_form, sentence) = normalize_inflections(query_str); if let Some(normalized) = norm_form { let mut norm_query = SuggestionQuery::new(jp_engine, normalized); norm_query.threshold = 10; norm_query.weights.total_weight = 0.75; norm_query.weights.freq_weight = 0.5; suggestion_task.add_query(norm_query); } // Fix typos let mut ng_ex = NGramExtension::with_sim_threshold(jp_engine, 0.5); ng_ex.options.weights.freq_weight = 0.05; ng_ex.query_weigth = 0.7; //ng_ex.cust_query = Some(&romaji_query); ng_ex.cust_query = Some(query_str.to_owned()); main_sugg_query.add_extension(ng_ex); // Similar terms based on pronounciation let mut ste = SimilarTermsExtension::new(jp_engine, 16); ste.options.threshold = 10; ste.options.weights.total_weight = 0.45; ste.options.weights.freq_weight = 0.05; //ste.options.weights.str_weight = 1.4; main_sugg_query.add_extension(ste); suggestion_task.add_query(main_sugg_query); // Add katakana results if query_str.has_kana() { let kanaquery = query_str.to_katakana(); if kanaquery != query_str { let mut kana_query = SuggestionQuery::new(jp_engine, kanaquery); kana_query.weights.total_weight = 0.8; suggestion_task.add_query(kana_query); } } let sentence_len = sentence.len(); let items: Vec<_> = sentence .into_iter() .filter(|i| !i.is_empty()) .map(|w| StringItem::new(w, 0.0)) .collect(); let items: Vec<_> = items .iter() .enumerate() .map(|(pos, i)| { let mut engine_item = i.into_engine_item(); engine_item.set_relevance((sentence_len - pos) as u16); engine_item }) .collect(); if sentence_len > 0 && sentence_len <= MAX_SENTENCE_LEN { suggestion_task.add_custom_entries(items); } // radical filter let word_res = resources::get().words(); suggestion_task.set_filter(move |item| { if radicals.is_empty() { return true; } let word = match word_res.by_sequence(item.word_id()) { Some(word) => word, None => return true, }; word_rad_filter(query_str, word, radicals) }); Some(convert_results(suggestion_task.search())) } pub(crate) fn normalize_inflections(query_str: &str) -> (Option, Vec) { let parse_res = sentence_reader::Parser::new(query_str).parse(); if let sentence_reader::output::ParseResult::InflectedWord(word) = parse_res { return (Some(word.get_normalized()), vec![]); } if let sentence_reader::output::ParseResult::Sentence(sentence) = parse_res { let items: Vec<_> = sentence .iter() .filter_map(|i| { let wc = i.word_class_raw(); if wc.is_space() || wc.is_symbol() || wc.is_particle() { return None; } Some(i.get_normalized()) }) .collect(); return (None, items); } (None, vec![]) } fn word_rad_filter(query: &str, word: &types::jotoba::words::Word, radicals: &[char]) -> bool { let kanji = match word.reading.kanji.as_ref() { Some(k) => &k.reading, None => return false, }; let retrieve = resources::get().kanji(); let query_kanji = query.chars().filter(|i| i.is_kanji()).collect::>(); kanji .chars() // Don't apply on existing kanji .filter(|i| !query_kanji.contains(&i)) .filter_map(|k| k.is_kanji().then(|| retrieve.by_literal(k)).flatten()) .any(|k| { if !k.parts.is_empty() { return utils::part_of(radicals, &k.parts); } false }) } ================================================ FILE: lib/api/src/app/details/mod.rs ================================================ pub mod sentences; pub mod word; ================================================ FILE: lib/api/src/app/details/sentences.rs ================================================ use crate::app::{search::sentences::convert_sentence, Result}; use actix_web::web::{Data, Json}; use config::Config; use engine::task::SearchTask; use error::api_error::RestError; use jp_utils::JapaneseExt; use search::{engine::words::native::Engine, word::order::native::NativeOrder}; use sentence_reader::output::ParseResult; use types::{ api::app::{ details::{query::DetailsPayload, sentence}, search::responses::{kanji::Kanji, words::Word}, }, jotoba::{sentences::Sentence, words::filter_languages}, }; pub async fn details_ep( payload: Json, config: Data, ) -> Result> { Ok(Json( sentence_details(&payload, &config).ok_or(RestError::NotFound)?, )) } fn sentence_details(payload: &DetailsPayload, config: &Config) -> Option { let sentence = resources::get().sentences().by_id(payload.sequence)?; let kanji = get_kanji(sentence); let words = get_words(sentence, payload, config); let sentence = search::sentence::result::Sentence::from_m_sentence(sentence, payload.lang_param())?; let sentence = convert_sentence(sentence); Some(sentence::Details::new(sentence, words, kanji)) } fn get_kanji(sentence: &Sentence) -> Vec { let kanji_iter = sentence.japanese.chars().filter(|i| i.is_kanji()); let mut out: Vec = vec![]; for k_lit in kanji_iter { if let Some(kanji) = resources::get().kanji().by_literal(k_lit) { out.push(kanji.to_owned().into()); } } out } fn get_words(sentence: &Sentence, payload: &DetailsPayload, config: &Config) -> Vec { let parsed = sentence_reader::Parser::new(&sentence.japanese).parse(); match parsed { ParseResult::Sentence(s) => s .iter() .map(|i| i.get_normalized()) .filter_map(|i| find_word(&i, payload, config)) .collect::>(), ParseResult::InflectedWord(i) => find_word(&i.get_normalized(), payload, config) .map(|i| vec![i]) .unwrap_or_default(), ParseResult::None => vec![], } } fn find_word(w: &str, payload: &DetailsPayload, config: &Config) -> Option { let mut task = SearchTask::::new(w) .with_limit(4) .with_threshold(0.8) .with_custom_order(NativeOrder::new(w.to_string())); let res = task.find(); if res.len() == 0 { return None; } let mut word = vec![res.into_inner().remove(0).item.clone()]; filter_languages(word.iter_mut(), payload.lang_param()); let word = super::super::conv_word(word.remove(0), payload.language, config); Some(word) } ================================================ FILE: lib/api/src/app/details/word.rs ================================================ use crate::app::Result; use actix_web::web::{Data, Json}; use config::Config; use error::api_error::RestError; use jp_utils::JapaneseExt; use types::{ api::app::{ details::{ query::DetailsPayload, word::{self, TransitivityPair}, }, search::responses::{kanji::Kanji, words::Word}, }, jotoba::language::Language, }; pub async fn details( payload: Json, config: Data, ) -> Result> { Ok(Json( Details::new(&payload) .ok_or(RestError::NotFound)? .get_details(&config), )) } pub(crate) struct Details<'a> { payload: &'a DetailsPayload, word: &'static types::jotoba::words::Word, } impl<'a> Details<'a> { #[inline] fn new(payload: &'a DetailsPayload) -> Option { let word = resources::get().words().by_sequence(payload.sequence)?; Some(Details { payload, word }) } fn get_details(&self, config: &Config) -> word::Details { let kanji = self.get_kanji(); let has_sentence = self.has_sentence(); let transitivity_pair = self.transitivity_pair(); let collocations = self.get_collocations(config); let inflection_table = self.word.get_inflections(); let word = self.get_word(config); word::Details::new( word, kanji, inflection_table, collocations, has_sentence, transitivity_pair, ) } fn get_kanji(&self) -> Vec { let retrieve = resources::get().kanji(); self.word .get_reading() .reading .chars() .filter_map(|i| i.is_kanji().then(|| i).and_then(|k| retrieve.by_literal(k))) .map(|i| (*i).clone().into()) .collect::>() } #[inline] fn has_sentence(&self) -> bool { self.word.has_sentence(self.payload.language) || (self.payload.show_english && self.word.has_sentence(Language::English)) } fn transitivity_pair(&self) -> Option { if let Some(trans) = self.word.transive_version { return Some(TransitivityPair::Transitive(trans.get())); } if let Some(intrans) = self.word.intransive_version { return Some(TransitivityPair::Intransitive(intrans.get())); } None } fn get_collocations(&self, config: &Config) -> Vec { let collocations = match &self.word.collocations { Some(colloc) => colloc, None => return vec![], }; let retrieve = resources::get().words(); collocations .iter() .filter_map(|i| { let word = retrieve.by_sequence(*i)?; Some(self.format_word(word, config)) }) .collect() } #[inline] fn get_word(&self, config: &Config) -> Word { self.format_word(self.word, config) } #[inline] fn format_word(&self, word: &types::jotoba::words::Word, config: &Config) -> Word { let mut word = word.clone(); word.adjust_language(self.payload.lang_param()); crate::app::conv_word(word, self.payload.language, config) } } ================================================ FILE: lib/api/src/app/img/mod.rs ================================================ #![allow(unused)] #[cfg(feature = "img_scan")] pub mod request; use actix_multipart::Multipart; use actix_web::web::{self, Json}; use config::Config; use error::api_error::RestError; use itertools::Itertools; use once_cell::sync::Lazy; use regex::Regex; use std::path::Path; use types::api::app::image::{Request, Response}; // MAX 2MB const MAX_UPLOAD_SIZE: usize = 2 * 1024 * 1024; // Filter japanese from image text const FILTER_JP_REGEX: Lazy = Lazy::new(|| Regex::new("[あ-ん一-龯一-龯0-9A-zア-ン』『]").unwrap()); /// Get search suggestions endpoint pub async fn scan_ep( payload: Multipart, args: web::Query, config: web::Data, ) -> Result, actix_web::Error> { // Load payload let local_file = request::read_payload(&config, payload).await?; // Scan image let local_file_cloned = local_file.clone(); let res = web::block(move || scan_image(local_file_cloned, &args, &config)).await; // Cleanup file web::block(move || std::fs::remove_file(local_file)).await??; // Handle result after cleaning up files Ok(Json(res??)) } /// Scans an image and returns a `Response` with the recognized text or an error #[cfg(feature = "img_scan")] fn scan_image>( file: P, req: &Request, config: &Config, ) -> Result { let tess_data = config.server.tess_data.as_ref().map(|i| i.as_str()); let mut lt = leptess::LepTess::new(tess_data, "jpn").map_err(|_| RestError::Internal)?; lt.set_image(file).map_err(|_| RestError::NoTextFound)?; if lt.get_source_y_resolution() <= 0 { lt.set_source_resolution(70) } if lt.mean_text_conf() < req.threshold { return Err(RestError::NoTextFound); } let text = lt .get_utf8_text() .ok() .and_then(|text| format_text(text)) .ok_or(RestError::NoTextFound)?; Ok(Response { text }) } /// Format non-japanese characters from scanned result fn format_text(text: String) -> Option { let modded_text = FILTER_JP_REGEX .captures_iter(&text) .into_iter() .map(|i| { i.iter() .filter_map(|j| Some(j?.as_str().to_string())) .collect::>() }) .flatten() .join(""); (!modded_text.is_empty()).then(|| modded_text) } #[cfg(not(feature = "img_scan"))] mod request { use super::*; use std::path::PathBuf; pub(crate) async fn read_payload( config: &Config, mut payload: Multipart, ) -> Result { todo!() } } /// Scans an image and returns a `Response` with the recognized text or an error #[cfg(not(feature = "img_scan"))] fn scan_image>( _file: P, _req: &Request, _config: &Config, ) -> Result { Ok(Response { text: String::from("unsupported"), }) } ================================================ FILE: lib/api/src/app/img/request.rs ================================================ use std::{ convert::TryInto, fs::{create_dir, File}, io::Write, path::{Path, PathBuf}, }; use actix_multipart::{Field, Multipart}; use actix_web::web; use config::Config; use error::api_error::{Origin, RestError}; use futures::{StreamExt, TryStreamExt}; use super::MAX_UPLOAD_SIZE; /// Reads, validates and stores a multipart for img_scan endpoint requests pub(crate) async fn read_payload( config: &Config, mut payload: Multipart, ) -> Result { // Generate file let rand_file = gen_local_file(config).await?; // Get first payload let field = payload .try_next() .await .ok() .flatten() .ok_or(RestError::Missing(Origin::File))?; // Read payload into file read_field(field, &rand_file).await?; Ok(rand_file) } async fn gen_local_file(config: &Config) -> Result { let path = config.get_img_scan_upload_path(); let rand_file = Path::new(&path); if !rand_file.exists() { let path = config.get_img_scan_upload_path(); web::block(move || create_dir(Path::new(&path))).await??; } Ok(rand_file.join(format!("{}_img_scan", utils::rand_alpha_numeric(75)))) } async fn read_field(mut field: Field, local_file: &PathBuf) -> Result<(), RestError> { let local_file_cloned = local_file.clone(); let mut local_file = web::block(move || File::create(&local_file_cloned)).await??; // Whether the magic number has been verified or not let mut verified = false; // The current amount of uploaded bytes let mut size = 0; while let Some(chunk) = field .next() .await .map(|i| i.map_err(|_| RestError::IoError)) { let chunk = chunk?; size += chunk.len(); if size > MAX_UPLOAD_SIZE { return Err(RestError::BadRequest.into()); } if !verified { check_magic_bytes(&chunk)?; verified = true; } local_file.write_all(&chunk)?; } Ok(()) } /// Verifies the input files magic number fn check_magic_bytes(chunk: &[u8]) -> Result<(), RestError> { let magic_bytes: [u8; 4] = chunk[0..4].try_into().map_err(|_| RestError::BadRequest)?; if !is_supported_format(magic_bytes) { return Err(RestError::FormatNotSupported.into()); } Ok(()) } /// Returns `true` if given magic_nr bytes represent a supported image format #[inline] fn is_supported_format(magic_nr: [u8; 4]) -> bool { match magic_nr { // JPG [255, 216, 255, 224] => true, // PNG [137, 80, 78, 71] => true, _ => false, } } ================================================ FILE: lib/api/src/app/kanji/ids_tree/builder.rs ================================================ use ids_parser::Origin; use once_cell::sync::Lazy; use std::collections::HashSet; use types::api::app::kanji::ids_tree::OutObject; static STOP_RADICALS: Lazy> = Lazy::new(|| { japanese::radicals::RADICALS .iter() .map(|i| i.1) .flatten() .map(|i| i.chars().next().unwrap()) .collect() }); pub struct KanjiTreeBuilder { build_full: bool, } impl KanjiTreeBuilder { /// Creates a new TreeBuilder. The parameter specifies whether a full tree should be bulit or /// Only one which is restricted to the Radicals used in the radical picker pub fn new(build_full: bool) -> Self { Self { build_full } } /// Recursive method to build the OutObjects pub fn build(&self, c: char) -> Option { let retrieve = resources::get().kanji(); let ids_kanji = retrieve.ids(c)?; let mut out = OutObject::new(c); out.set_literal_available(retrieve.has_literal(c)); //let radicals = ids_kanji.comp_by_lang(Origin::Japan)?.get_radicals(); let comps = match ids_kanji.comp_by_lang(Origin::Japan) { Some(s) => s, None => { if ids_kanji.compositions.len() == 1 { &ids_kanji.compositions[0] } else { return None; } } }; let radicals = comps.get_radicals(); // recursive exit condition if (radicals.len() == 1 && radicals[0] == c) || radicals.is_empty() || (STOP_RADICALS.contains(&c) && !self.build_full) { return Some(out); } let mut visited_items = HashSet::with_capacity(radicals.len()); for radical in radicals { if visited_items.contains(&radical) { continue; } if let Some(child) = self.build(radical) { out.add_child(child); } visited_items.insert(radical); } Some(out) } } ================================================ FILE: lib/api/src/app/kanji/ids_tree/mod.rs ================================================ pub mod builder; use actix_web::web::Json; use builder::KanjiTreeBuilder; use error::api_error::RestError; use types::api::app::kanji::ids_tree::{Request, Response}; /// Get a decomposition graph pub async fn decomp_graph(payload: Json) -> Result, RestError> { let tree = KanjiTreeBuilder::new(payload.full) .build(payload.literal) .ok_or(RestError::NotFound)?; let size_opposite = KanjiTreeBuilder::new(!payload.full) .build(payload.literal) .ok_or(RestError::NotFound)?; let has_big = tree != size_opposite; Ok(Json(Response::new(tree, has_big))) } ================================================ FILE: lib/api/src/app/kanji/mod.rs ================================================ pub mod ids_tree; ================================================ FILE: lib/api/src/app/mod.rs ================================================ pub mod completions; pub mod details; pub mod img; pub mod kanji; pub mod news; pub mod radical; pub mod search; use std::path::Path; use config::Config; use error::api_error::RestError; use types::{ api::app::search::responses::words, jotoba::{self, language::Language}, }; pub type Result = std::result::Result; pub(crate) fn conv_word(word: jotoba::words::Word, lang: Language, config: &Config) -> words::Word { let is_common = word.is_common(); let accents = word.get_pitches(); let audio = word.audio_file_name().and_then(|name| { let audio_p = Path::new("mp3").join(name); let local_path = Path::new(config.server.get_audio_files()).join(&audio_p); if local_path.exists() { let url = Path::new("/audio/") .join(&audio_p) .to_str() .unwrap() .to_string(); Some(url) } else { None } }); let reading = word .furigana .as_ref() .map(|i| i.clone()) .unwrap_or(word.get_reading().reading.clone()); let alt_readings = word .reading .alternative .into_iter() .map(|i| i.reading) .collect(); let senses = word .senses .into_iter() .map(|i| conv_ex_sentence(i, lang)) .collect::>(); words::Word { sequence: word.sequence, is_common, reading, alt_readings, senses, accents, jlpt_lvl: word.jlpt_lvl.map(|i| i.get()), furigana: word.furigana, transive_version: word.transive_version.map(|i| i.get()), intransive_version: word.intransive_version.map(|i| i.get()), sentences_available: word.sentences_available, audio, } } #[inline] pub fn conv_ex_sentence(sense: jotoba::words::sense::Sense, lang: Language) -> words::Sense { let glosses = sense .glosses .into_iter() .map(|i| i.gloss) .collect::>(); let example_sentence = sense .example_sentence .and_then(|i| get_example_sentence(i, lang)); words::Sense { misc: sense.misc, field: sense.field, dialect: sense.dialect, glosses, xref: sense.xref, antonym: sense.antonym, information: sense.information, part_of_speech: sense.part_of_speech, language: sense.language, example_sentence, gairaigo: sense.gairaigo, } } fn get_example_sentence(id: u32, language: Language) -> Option<(String, String)> { let sentence = resources::get().sentences().by_id(id)?; let translation = sentence .translation_for(language) .or_else(|| sentence.translation_for(Language::English))?; Some((sentence.furigana.clone(), translation.to_string())) } ================================================ FILE: lib/api/src/app/news/detailed.rs ================================================ use actix_web::web::Json; use error::api_error; use types::api::app::news::long::{Request, Response}; /// Get detailed news endpoint pub async fn news(payload: Json) -> Result, actix_web::Error> { let id = payload.id; let entry = news::get() .by_id(id) .map(|i| super::ne_from_resource(i, false)) .ok_or(api_error::RestError::NotFound)?; Ok(Json(Response { entry })) } ================================================ FILE: lib/api/src/app/news/mod.rs ================================================ pub mod detailed; pub mod short; use types::api::app::news::NewsEntry; fn ne_from_resource(src: &news::NewsEntry, short: bool) -> NewsEntry { let html = if short { src.short.clone() } else { src.long.clone() }; NewsEntry { id: src.id, html, title: src.title.clone(), creation_time: src.creation_time, trimmed: src.was_trimmed && !short, } } ================================================ FILE: lib/api/src/app/news/short.rs ================================================ use actix_web::web::Json; use types::api::app::news::short::{Request, Response}; /// Get short news endpoint pub async fn news(payload: Json) -> Result, actix_web::Error> { let after = payload.after; let entries = news::get() .last_entries(3) .filter(|i| i.creation_time > after) .map(|i| super::ne_from_resource(i, true)) .collect::>(); Ok(Json(Response { entries })) } ================================================ FILE: lib/api/src/app/radical/kanji.rs ================================================ use actix_web::web::Json; use intmap::{int_set::IntSet, IntMap}; use std::{collections::HashMap, time::Instant}; use types::api::app::radical::find_kanji::{Request, Response}; /// Get kanji by its radicals pub async fn kanji_by_radicals(payload: Json) -> Result, actix_web::Error> { let start = Instant::now(); let res = find_kanji(&payload.radicals); log::debug!("Radical results took: {:?}", start.elapsed()); Ok(Json(res)) } pub fn find_kanji(rads: &[char]) -> Response { let mut possible_rads_set = IntSet::with_capacity(rads.len() * 3); let mut kanji_res: IntMap> = IntMap::with_capacity(8); let k_retrieve = resources::get().kanji(); for kanji in k_retrieve.by_radicals(rads) { push_or_insert(&mut kanji_res, kanji.stroke_count as u32, kanji.literal); if !kanji.parts.is_empty() { possible_rads_set.reserve(kanji.parts.len()); possible_rads_set.extend(kanji.parts.iter().map(|i| *i as u32)); } } let mut possible_rads = HashMap::>::new(); for i in possible_rads_set { let c = unsafe { char::from_u32_unchecked(i) }; let s_count = japanese::radicals::get_stroke_count(c).unwrap(); possible_rads.entry(s_count as u32).or_default().push(c); } // Sort all radicals for (_, v) in possible_rads.iter_mut() { v.sort_unstable(); } let mut kanji_res2 = HashMap::>::with_capacity(kanji_res.len()); kanji_res2.extend(kanji_res); Response { possible_radicals: possible_rads, kanji: kanji_res2, } } fn push_or_insert(map: &mut IntMap>, key: u32, item: T) { if let Some(s) = map.get_mut(key) { s.push(item); return; } let capacity = (25u32.saturating_sub(key) + 1) * 2; let mut new_vec = Vec::with_capacity(capacity as usize); new_vec.push(item); map.insert(key, new_vec); } ================================================ FILE: lib/api/src/app/radical/mod.rs ================================================ pub mod kanji; pub mod search; pub use kanji::kanji_by_radicals; ================================================ FILE: lib/api/src/app/radical/search/jp_search.rs ================================================ use jp_utils::JapaneseExt; use search::radical::word::RomajiSearch; use std::collections::{HashMap, HashSet}; use types::{api::app::radical::search::KanjiRads, jotoba::kanji::Kanji}; /// Returns a list of radicals based on the radical-search `query` pub fn search(query: &str) -> HashSet { if query.has_kanji() { return kanji_search(query); } RomajiSearch::new(query).run() } /// Returns a List of kanji that use similar radicals as the query. pub fn similar_kanji_search(query: &str) -> Vec { let kanji = query .chars() .filter(|i| i.is_kanji()) .filter_map(|lit| get_kanji(lit)); let mut dups: HashSet = HashSet::new(); let mut out: Vec = Vec::new(); for kanji in kanji { // Add written kanji to the result too out.push(into_kanji_rads(kanji)); dups.insert(kanji.literal); for part in kanji.parts.iter() { let mut kanji_w_r = resources::get().kanji().by_radicals(&[*part]); kanji_w_r.sort_by(|a, b| a.stroke_count.cmp(&b.stroke_count)); for k in kanji_w_r.into_iter().take(10) { if k.stroke_count < kanji.stroke_count || dups.contains(&k.literal) { continue; } dups.insert(k.literal); out.push(into_kanji_rads(k)); } } } out.truncate(50); out } #[inline] fn get_kanji(lit: char) -> Option<&'static Kanji> { resources::get().kanji().by_literal(lit) } /// Convert a kanji to a `KanjiRads` fn into_kanji_rads(kanji: &Kanji) -> KanjiRads { let mut rads: HashMap> = HashMap::with_capacity(kanji.parts.len()); for part in &kanji.parts { let stroke_count = japanese::radicals::get_stroke_count(*part); if let Some(stroke_count) = stroke_count { rads.entry(stroke_count).or_default().push(*part); } } KanjiRads::new(kanji.literal, rads) } /// Takes all kanji from `query` and returns a list of all unique radicals to build all kanji /// picked from `query` #[inline] fn kanji_search(query: &str) -> HashSet { query.chars().map(|k| kanji_radicals(k)).flatten().collect() } #[inline] fn kanji_radicals(kanji: char) -> Vec { get_kanji(kanji) .map(|i| i.parts.clone()) .unwrap_or_default() } /* /// Does a kana word-search and returns some likely radicals for the given query fn kana_search(query: &str) -> HashSet { let mut search_task: SearchTask = SearchTask::new(&query) .with_limit(3) .with_threshold(0.8) .with_custom_order(NativeOrder::new(query.to_string())); search_task .find() .into_iter() .map(|i| i.get_reading().reading.chars().filter(|i| i.is_kanji())) .flatten() .unique() .map(|kanji| kanji_radicals(kanji)) .flatten() .take(10) .collect() } */ ================================================ FILE: lib/api/src/app/radical/search/meaning.rs ================================================ use std::collections::HashSet; use japanese::ToKanaExt; use types::jotoba::language::Language; pub fn search(query: &str, language: Language) -> HashSet { if query.len() < 2 { return HashSet::new(); } let mut res = search::radical::meaning_search(query); if res.len() > 4 { return res; } if japanese::guessing::could_be_romaji(query) { res.extend(super::jp_search::search(&query.to_hiragana())); } else { //res.extend(word_search(query, language)); let fw_search = search::radical::word::ForeignSearch::new(query, language); res.extend(fw_search.run()) } res } ================================================ FILE: lib/api/src/app/radical/search/mod.rs ================================================ mod jp_search; mod meaning; use std::{ collections::{BTreeSet, HashMap, HashSet}, str::FromStr, }; use actix_web::{web::Json, HttpRequest}; use error::api_error::RestError; use jp_utils::JapaneseExt; use types::{ api::app::radical::search::{Request, Response}, jotoba::language::Language, }; /// Search for radicals pub async fn search_radical( mut payload: Json, request: HttpRequest, ) -> Result, actix_web::Error> { verify_payload(&mut payload)?; let rad_res; let mut kanji_res = vec![]; if payload.query.is_japanese() { rad_res = jp_search::search(&payload.query); kanji_res = jp_search::similar_kanji_search(&payload.query); } else { rad_res = meaning::search(&payload.query, user_lang(&request)); } if rad_res.is_empty() && kanji_res.is_empty() { return Ok(Json(Response::default())); } let radicals = map_radicals(&rad_res); Ok(Json(Response { radicals, kanji: kanji_res, })) } /// Load the users language from cookies #[inline] fn user_lang(request: &HttpRequest) -> Language { request .cookie("default_lang") .and_then(|i| Language::from_str(i.value()).ok()) .unwrap_or_default() } /// Maps radicals by its literals to ResRadical with its stroke count fn map_radicals(inp: &HashSet) -> HashMap> { let mut radicals: HashMap> = HashMap::with_capacity(inp.len()); for (lit, strokes) in inp .iter() .filter_map(|lit| Some((*lit, japanese::radicals::get_stroke_count(*lit)?))) { radicals.entry(strokes as u8).or_default().insert(lit); } radicals } /// Verifies the payload itself and returns a proper error if the request is invalid fn verify_payload(payload: &mut Request) -> Result<(), RestError> { if payload.query.trim().is_empty() { return Err(RestError::BadRequest); } payload.query = payload.query.trim().to_string(); Ok(()) } ================================================ FILE: lib/api/src/app/search/kanji.rs ================================================ use super::new_page; use super::convert_payload; use crate::app::Result; use actix_web::web::{self, Json}; use error::api_error::RestError; use types::jotoba::language::param::AsLangParam; use types::{ api::app::search::{ query::SearchPayload, responses::{ k_compounds::{CompoundResponse, CompoundSet, CompoundWord}, kanji, Response, }, }, jotoba::{ search::SearchTarget, words::{filter_languages, Word}, }, }; /// API response type pub type SearchResp = Response; /// Do an app kanji search via API pub async fn search(payload: Json) -> Result> { let query = convert_payload(&payload) .parse() .ok_or(RestError::BadRequest)?; let query_c = query.clone(); let result = web::block(move || search::kanji::search(&query_c)).await??; let items = result .items .into_iter() .map(|i| { let k: kanji::Kanji = i.kanji.into(); k }) .collect::>(); let len = result.total_len as u32; let kanji = kanji::KanjiResponse::new(items); let page = new_page(&payload, kanji, len, payload.settings.page_size); Ok(Json(super::new_response(page, SearchTarget::Kanji, &query))) } /// Kanji compound request pub async fn reading_compounds(payload: Json) -> Result> { let lang = payload.lang_param(); let compounds: Vec<_> = payload .query_str .chars() .filter_map(|i| resources::get().kanji().by_literal(i)) .map(|i| { let on_words = convert_dicts(&i.on_dicts, lang); let kun_words = convert_dicts(&i.on_dicts, lang); CompoundSet::new(on_words, kun_words) }) .collect(); Ok(Json(CompoundResponse::new(compounds))) } #[inline] fn convert_dicts(dicts: &Vec, lang: impl AsLangParam) -> Vec { load_dicts(dicts, lang) .into_iter() .filter_map(|j| Some(CompoundWord::from_word(&j))) .collect::>() } #[inline] fn load_dicts(dicts: &Vec, lang: impl AsLangParam) -> Vec { let word_storage = resources::get().words(); let mut words: Vec<_> = dicts .iter() .filter_map(|j| word_storage.by_sequence(*j)) .cloned() .collect(); filter_languages(words.iter_mut(), lang); words } ================================================ FILE: lib/api/src/app/search/mod.rs ================================================ pub mod kanji; pub mod names; pub mod sentences; pub mod words; use search::{ query::UserSettings, query::{parser::QueryParser, Query}, }; use serde::Serialize; use types::{ api::app::search::{query::SearchPayload, responses::Response}, jotoba::{ pagination::{page::Page, Pagination}, search::SearchTarget, }, }; const FIRST_PAGE: u32 = 1; const LAST_PAGE: u32 = 100; pub(crate) fn new_response( page: Page, q_type: SearchTarget, query: &Query, ) -> Response { Response::with_help_fn(page, |p| { if !p.is_empty() { return None; } search::build_help(q_type, &query) }) } pub(crate) fn new_page( pl: &SearchPayload, v: V, items: u32, items_per_page: u32, ) -> Page { let current_page = if items > 0 { (pl.page.unwrap_or(FIRST_PAGE)).max(FIRST_PAGE) } else { 0 }; let mut pagination = Pagination::new_page(v, current_page, items, items_per_page, LAST_PAGE); if items == 0 { pagination.set_pages(0); } pagination } pub(crate) fn convert_payload(pl: &SearchPayload) -> QueryParser { let user_settings = convert_user_settings(&pl.settings); let mut q_parser = QueryParser::new( pl.query_str.clone(), types::jotoba::search::SearchTarget::Kanji, user_settings, ) .with_page(pl.page.unwrap_or_default() as usize) .with_word_index(pl.word_index.unwrap_or_default()); if let Some(lang) = pl.lang_overwrite { q_parser = q_parser.with_lang_overwrite(lang); } q_parser } pub(crate) fn convert_user_settings( settings: &types::api::app::search::query::UserSettings, ) -> UserSettings { UserSettings { user_lang: settings.user_lang, show_english: settings.show_english, english_on_top: true, page_size: settings.page_size, show_example_sentences: settings.show_example_sentences, sentence_furigana: settings.sentence_furigana, ..Default::default() } } ================================================ FILE: lib/api/src/app/search/names.rs ================================================ use super::new_page; use super::convert_payload; use crate::app::Result; use actix_web::web::{self, Json}; use error::api_error::RestError; use search::SearchExecutor; use types::{ api::app::search::{ query::SearchPayload, responses::{names, Response}, }, jotoba::search::SearchTarget, }; /// API response type pub type Resp = Response; /// Do an app name search via API pub async fn search(payload: Json) -> Result> { let query = convert_payload(&payload) .parse() .ok_or(RestError::BadRequest)?; let query_c = query.clone(); let result = web::block(move || { let search = search::name::Search::new(&query_c); SearchExecutor::new(search).run() }) .await?; let res = names::Response::new(result.items.into_iter().cloned().collect()); let len = result.total as u32; let page = new_page(&payload, res, len, payload.settings.page_size); let res = super::new_response(page, SearchTarget::Names, &query); Ok(Json(res)) } ================================================ FILE: lib/api/src/app/search/sentences.rs ================================================ use super::new_page; use super::convert_payload; use crate::app::Result; use actix_web::web::{self, Json}; use error::api_error::RestError; use types::{ api::app::search::{ query::SearchPayload, responses::{sentences, Response}, }, jotoba::search::SearchTarget, }; /// API response type pub type Resp = Response; /// Do an app sentence search via API pub async fn search(payload: Json) -> Result> { let query = convert_payload(&payload) .parse() .ok_or(RestError::BadRequest)?; let query_c = query.clone(); let result = web::block(move || { let search = search::sentence::Search::new(&query_c); search::SearchExecutor::new(search).run() }) .await?; let items = result .items .into_iter() .map(|i| convert_sentence(i)) .collect::>(); let res = sentences::Response::new(items); let len = result.total as u32; let page = new_page(&payload, res, len, payload.settings.page_size); let res = super::new_response(page, SearchTarget::Sentences, &query); Ok(Json(res)) } #[inline] pub(crate) fn convert_sentence( sentence: search::sentence::result::Sentence, ) -> sentences::Sentence { sentences::Sentence::new( sentence.id, sentence.furigana.to_string(), sentence.translation.to_string(), ) } ================================================ FILE: lib/api/src/app/search/words.rs ================================================ use super::new_page; use super::convert_payload; use crate::app::Result; use actix_web::web::Data; use actix_web::web::{self, Json}; use config::Config; use error::api_error::RestError; use search::{word::Search, SearchExecutor}; use types::{ api::app::search::{ query::SearchPayload, responses::{ words::{self, Sentence}, Response, }, }, jotoba::search::SearchTarget, }; /// API response type pub type Resp = Response; /// Do an app word search via API pub async fn search(payload: Json, config: Data) -> Result> { let query = convert_payload(&payload) .parse() .ok_or(RestError::BadRequest)?; let user_lang = query.settings.user_lang; let query_c = query.clone(); let result = web::block(move || { let search = Search::new(&query_c); SearchExecutor::new(search).run() }) .await?; let kanji = search::word::kanji::load_word_kanji_info(&result.items) .into_iter() .map(|i| i.into()) .collect::>(); let words = result .items .iter() .map(|i| super::super::conv_word(i.clone(), user_lang, &config)) .collect::>(); let s_index = result.sentence_index(); let number = result.number.clone(); let sentence = result .other_data .sentence .and_then(|i| i.parts) .map(|i| conv_sentence(i, s_index)); let infl_info = result.other_data.inflection.map(|i| conv_infl_info(i)); let original_query = result.other_data.raw_query.clone(); let res = words::Response::new(words, kanji, infl_info, sentence, original_query, number); let len = result.total as u32; let page = new_page(&payload, res, len, payload.settings.page_size); let res = super::new_response(page, SearchTarget::Words, &query); Ok(Json(res)) } fn conv_sentence(sentence: sentence_reader::Sentence, index: usize) -> Sentence { let parts = sentence .into_parts() .into_iter() .map(|i| i.into()) .collect(); Sentence::new(index, parts) } fn conv_infl_info(infl_info: search::word::result::InflectionInformation) -> words::InflectionInfo { words::InflectionInfo::new(infl_info.inflections, infl_info.lexeme) } ================================================ FILE: lib/api/src/internal/info/mod.rs ================================================ pub mod words; ================================================ FILE: lib/api/src/internal/info/words.rs ================================================ use std::collections::HashSet; use actix_web::{web::Json, HttpResponse}; use error::api_error::RestError; use types::{ api::internal::info::words::{Request, Response, WordItem}, jotoba::words::{part_of_speech::PosSimple, Word}, }; /// Handles a word info API request pub async fn word_info(payload: Json) -> Result { let word_retr = resources::get().words(); let items: Vec<_> = payload .ids .iter() .filter_map(|i| word_retr.by_sequence(*i)) .cloned() .map(|mut word| { word.adjust_language(payload.lang_param()); let pos = unique_pos(&word); WordItem { sentences: vec![], audio: word.audio_file_name_old(), word, pos, } }) .collect(); let response = Response { items }; Ok(HttpResponse::Ok().body(bincode::serialize(&response).unwrap())) } fn unique_pos(word: &Word) -> Vec { word.senses() .into_iter() .map(|i| &i.part_of_speech) .flatten() .map(|i| i.to_pos_simple()) .flatten() .collect::>() .into_iter() .collect() } ================================================ FILE: lib/api/src/internal/mod.rs ================================================ pub mod info; ================================================ FILE: lib/api/src/lib.rs ================================================ /// API endpoints for the webapp pub mod app; /// API endpoints for internal communication pub mod internal; /// Search API endpoint pub mod search; ================================================ FILE: lib/api/src/search/kanji/mod.rs ================================================ use actix_web::web::{self, Data, Json}; use config::Config; use types::{ api::search::kanji::{Kanji, Response}, jotoba::search::SearchTarget, }; use super::{Result, SearchRequest}; /// Do a kanji search via API pub async fn kanji_search( payload: Json, config: Data, ) -> Result> { let query = super::parse_query(payload, SearchTarget::Kanji)?; let result = web::block(move || search::kanji::search(&query)) .await?? .items; Ok(Json(to_response(result, &config))) } #[inline] fn to_response(items: Vec, config: &Config) -> Response { let kanji = items .into_iter() .map(|i| Kanji::from(&i.kanji, config.server.get_html_files())) .collect(); Response { kanji } } ================================================ FILE: lib/api/src/search/mod.rs ================================================ pub mod kanji; pub mod name; pub mod sentence; pub mod word; use actix_web::web::Json; use error::api_error::RestError; use search::query::{parser::QueryParser, Query, UserSettings}; use types::{api::search::SearchRequest, jotoba::search::SearchTarget}; pub type Result = std::result::Result; pub(crate) fn parse_query(payload: Json, q_type: SearchTarget) -> Result { let settings = UserSettings { user_lang: payload.language, show_english: !payload.no_english, ..UserSettings::default() }; let q_str = payload.query_str.clone(); let query = QueryParser::new(q_str, q_type, settings) .parse() .ok_or(RestError::BadRequest)?; Ok(query) } ================================================ FILE: lib/api/src/search/name/mod.rs ================================================ use actix_web::web::{self, Json}; use search::SearchExecutor; use types::{api::search::name::Response, jotoba::search::SearchTarget}; use super::{Result, SearchRequest}; /// Do a name search via API pub async fn name_search(payload: Json) -> Result> { let query = super::parse_query(payload, SearchTarget::Kanji)?; let result = web::block(move || { let search = search::name::Search::new(&query); SearchExecutor::new(search).run() }) .await?; Ok(Json(result.items.into())) } ================================================ FILE: lib/api/src/search/sentence/mod.rs ================================================ use actix_web::web::{self, Json}; use types::{ api::search::sentence::{Response, Sentence}, jotoba::search::SearchTarget, }; use super::{Result, SearchRequest}; /// Do a Sentence search via API pub async fn sentence_search(payload: Json) -> Result> { let query = super::parse_query(payload, SearchTarget::Kanji)?; let result = web::block(move || { let search = search::sentence::Search::new(&query); search::SearchExecutor::new(search).run() }) .await? .items .into_iter() .map(|i| search_to_sentence(i)) .collect::>(); Ok(Json(result.into())) } #[inline] fn search_to_sentence(sentence: search::sentence::result::Sentence) -> Sentence { Sentence { eng: sentence.get_english().map(|i| i.to_owned()), content: sentence.content.to_string(), furigana: sentence.furigana.to_string(), translation: sentence.translation.to_string(), language: sentence.language, } } ================================================ FILE: lib/api/src/search/word/mod.rs ================================================ use super::{Result, SearchRequest}; use actix_web::web::{self, Data, Json}; use config::Config; use search::{word::Search, SearchExecutor}; use types::{ api::search::{ kanji::Kanji, word::{Response, Word}, }, jotoba::search::SearchTarget, }; /// Do a word search via API pub async fn word_search( payload: Json, config: Data, ) -> Result> { let query = super::parse_query(payload, SearchTarget::Words)?; let result = web::block(move || { let search = Search::new(&query); SearchExecutor::new(search).run() }) .await?; let kanji: Vec = search::word::kanji::load_word_kanji_info(&result.items) .into_iter() .map(|i| Kanji::from(&i, config.server.get_html_files())) .collect(); let words: Vec = result.items.into_iter().map(|i| (&i).into()).collect(); Ok(Json(Response::new(words, kanji))) } ================================================ FILE: lib/config/Cargo.toml ================================================ [package] name = "config" version = "0.1.0" authors = ["jojii "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] serde = { version = "1.0.171", features = ["derive"] } toml = "0.7.6" sha1 = { git = "https://github.com/mitsuhiko/rust-sha1"} ================================================ FILE: lib/config/src/lib.rs ================================================ use std::{ fs::DirEntry, io::{BufReader, Read, Write}, time::Duration, }; use serde::{Deserialize, Serialize}; use std::{ fs::{self, File}, path::{Path, PathBuf}, }; #[derive(Serialize, Deserialize, Default, Clone, Debug)] pub struct Config { pub server: ServerConfig, pub sentry: Option, pub search: Option, #[serde(skip)] pub asset_hash: String, } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct ServerConfig { pub html_files: Option, pub audio_files: Option, pub listen_address: String, pub storage_data: Option, pub img_upload_dir: Option, pub tess_data: Option, pub news_folder: Option, pub unidic_dict: Option, pub debug_mode: Option, pub internal_api_key: String, } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct SentryConfig { pub dsn: String, } #[derive(Serialize, Deserialize, Clone, Debug, Default)] pub struct SearchConfig { pub suggestion_sources: Option, pub indexes_source: Option, pub report_queries_after: Option, } impl Config { /// Returns the configured index source files or its default value if not set pub fn get_indexes_source(&self) -> &str { self.search .as_ref() .and_then(|i| i.indexes_source.as_deref()) .unwrap_or("./resources/indexes") } /// Returns the configured suggestion source files or its default value if not set pub fn get_suggestion_sources(&self) -> &str { self.search .as_ref() .and_then(|i| i.suggestion_sources.as_deref()) .unwrap_or("./resources/suggestions") } /// Returns the configured query report timeout pub fn get_query_report_timeout(&self) -> Duration { let timeout = self .search .as_ref() .and_then(|i| i.report_queries_after) .unwrap_or(4); Duration::from_secs(timeout) } /// Returns the configured (or default) path for storage data pub fn get_storage_data_path(&self) -> String { self.server .storage_data .as_ref() .cloned() .unwrap_or_else(|| ServerConfig::default().storage_data.unwrap()) } pub fn get_kreading_freq_path(&self) -> String { Path::new(self.get_indexes_source()) .join("kreading_freq_index") .to_str() .unwrap() .to_string() } /// Returns the configured (or default) path for the radical map pub fn get_unidic_dict(&self) -> String { self.server .unidic_dict .as_ref() .cloned() .unwrap_or_else(|| ServerConfig::default().unidic_dict.unwrap()) } /// Returns the configured (or default) path for the radical map pub fn get_img_scan_upload_path(&self) -> String { self.server .img_upload_dir .as_ref() .cloned() .unwrap_or_else(|| ServerConfig::default().img_upload_dir.unwrap()) } /// Returns `true` if system is in debug mode pub fn is_debug(&self) -> bool { self.server.debug_mode.unwrap_or(false) } } impl Default for ServerConfig { #[inline] fn default() -> Self { Self { html_files: Some(String::from("html/assets")), audio_files: Some(String::from("html/audio")), listen_address: String::from("127.0.0.1:8080"), storage_data: Some(String::from("./resources/storage_data")), img_upload_dir: Some(String::from("./img_scan_tmp")), unidic_dict: Some(String::from("./resources/unidic-mecab")), tess_data: None, news_folder: Some(String::from("./resources/news")), debug_mode: Some(false), internal_api_key: "ReplaceMe!!!!".to_string(), } } } impl ServerConfig { pub fn get_audio_files(&self) -> &str { self.audio_files.as_deref().unwrap_or("html/audio") } pub fn get_html_files(&self) -> &str { self.html_files.as_deref().unwrap_or("html/assets") } pub fn get_locale_path(&self) -> &str { "./locales" } pub fn get_news_folder(&self) -> &str { self.news_folder.as_deref().unwrap_or("./resources/news") } } impl Config { /// Create a new config object pub fn new(src: Option) -> Result { let config_file = src .or_else(|| { std::env::var("JOTOBA_CONFIG") .map(|i| Path::new(&i).to_owned()) .ok() }) .unwrap_or(Self::get_config_file()?); let mut config = if !config_file.exists() // Check if file is empty || fs::metadata(&config_file).map(|i| i.len()).unwrap_or(1) == 0 { Self::default().save()? } else { let conf_data = fs::read_to_string(&config_file).map_err(|e| e.to_string())?; toml::from_str(&conf_data).map_err(|e| e.to_string())? }; /* // Warn if sentry is configured but feature not enabled #[cfg(not(feature = "sentry_error"))] if let Some(ref sentry) = config.sentry { if !sentry.dsn.is_empty() { warn!("Sentry configured but not available. Build with \"sentry_error\" feature"); } } */ config.asset_hash = variable_asset_hash(&config).map_err(|i| i.to_string())?; Ok(config) } // Save the config fn save(self) -> Result { let config_file = Self::get_config_file()?; let s = toml::to_string_pretty(&self).map_err(|e| e.to_string())?; let mut f = File::create(&config_file).map_err(|e| e.to_string())?; f.write_all(s.as_bytes()).map_err(|e| e.to_string())?; Ok(self) } // Create missing folders and return the config file pub fn get_config_file() -> Result { let conf_dir: PathBuf = Path::new("./").join("data"); if !conf_dir.exists() { fs::create_dir_all(&conf_dir).map_err(|e| e.to_string())?; } Ok(conf_dir.join("config.toml")) } } fn variable_asset_hash(config: &Config) -> std::io::Result { let asset_path = Path::new(config.server.get_html_files()); let js_files = dir_content(&asset_path.join("js"))?; let css_files = dir_content(&asset_path.join("css"))?; let mut files = js_files .into_iter() .chain(css_files.into_iter()) .collect::>(); files.sort_by(|a, b| a.file_name().cmp(&b.file_name())); let mut hash = sha1::Sha1::new(); let mut buf: Vec = vec![0u8; 100]; for file in files { let mut content = BufReader::new(File::open(file)?); loop { let read = content.read(&mut buf[..])?; if read == 0 { break; } hash.update(&buf[..read]); } } Ok(hash.digest().to_string()) } fn dir_content(path: &Path) -> std::io::Result> { let mut files = Vec::new(); visit_dirs(path, &mut files)?; Ok(files.into_iter().map(|i| i.path()).collect::>()) } fn visit_dirs(dir: &Path, out: &mut Vec) -> std::io::Result<()> { if dir.is_dir() { for entry in std::fs::read_dir(dir)? { let entry = entry?; let path = entry.path(); if path.is_dir() { visit_dirs(&path, out)?; } else { out.push(entry) } } } Ok(()) } ================================================ FILE: lib/engine/Cargo.toml ================================================ [package] name = "engine" version = "0.1.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] types = { path = "../types", default-features = false, features = [ "jotoba_intern", ] } #priority_container = { path = "../../../priority_container" } priority_container = { git = "https://github.com/JojiiOfficial/PrioContainer/" } order_struct = { git = "https://github.com/JojiiOfficial/OrderStruct" } #index_framework = { path = "../../../index_framework"} index_framework = { git = "https://github.com/WeDontPanic/index_framework" } #vsm = { path = "../../../vsm" } sparse_vec = { git = "https://github.com/JojiiOfficial/SparseVec"} ================================================ FILE: lib/engine/src/lib.rs ================================================ pub mod pushable; pub mod relevance; pub mod result; pub mod task; pub mod utils; use index_framework::{ retrieve::{retriever::Retriever, Retrieve}, traits::{backend::Backend, deser::DeSer}, }; use std::hash::Hash; use types::jotoba::language::Language; /// Generic search engine pub trait Engine<'index> { // Index type B: Backend; // Index dictionary term type DictItem: DeSer + Ord + From; /// Index output type Document: DeSer; /// Retrieving algorithm type Retriever: Retriever< 'index, Self::B, Self::DictItem, Self::Document, Output = Self::Document, >; /// Engine output type Output: Eq + Hash + Clone; /// The search query type Query; fn make_query>(inp: S, lang: Option) -> Option; /// Converts index output to engine output fn doc_to_output(input: &Self::Document) -> Option>; /// Returns the engines index fn get_index(lang: Option) -> &'index Self::B; /// Returns a new retrieve for the given terms fn retrieve_for( inp: &Self::Query, query_str: &str, lang: Option, ) -> Retrieve<'index, Self::B, Self::DictItem, Self::Document>; /// Returns a new retrieve for the engine #[inline] fn retrieve( lang: Option, ) -> Retrieve<'index, Self::B, Self::DictItem, Self::Document> { Retrieve::new(Self::get_index(lang)) } } ================================================ FILE: lib/engine/src/pushable/counter.rs ================================================ use std::marker::PhantomData; use super::Pushable; /// Counts all push calls without storing the items pub struct Counter { c: usize, p: PhantomData, } impl Counter { #[inline] pub fn new() -> Self { Self { c: 0, p: PhantomData, } } #[inline] pub fn val(&self) -> usize { self.c } } impl Pushable for Counter { type Item = T; #[inline] fn push(&mut self, _: Self::Item) -> bool { self.c += 1; true } } ================================================ FILE: lib/engine/src/pushable/f_max_cnt.rs ================================================ use super::Pushable; use std::marker::PhantomData; /// A counter that Implements CancelPushable which counts up to a fixed value and /// Cancels counting if this value has been reached pub struct FilteredMaxCounter<'a, T> { val: usize, max: usize, pub filter: Box bool + 'a>, p: PhantomData, } impl<'a, T> FilteredMaxCounter<'a, T> { #[inline] pub fn new(max: usize, filter: F) -> Self where F: Fn(&T) -> bool + 'a, { Self { val: 0, max, filter: Box::new(filter), p: PhantomData, } } #[inline] pub fn val(&self) -> usize { self.val } #[inline] pub fn inc(&mut self, delta: usize) { self.val += delta; } #[inline] pub fn is_full(&self) -> bool { self.val >= self.max } } impl<'a, T> Pushable for FilteredMaxCounter<'a, T> { type Item = T; #[inline] fn push(&mut self, i: Self::Item) -> bool { if self.is_full() { return false; } if !(self.filter)(&i) { self.val += 1; } true } } ================================================ FILE: lib/engine/src/pushable/max_cnt.rs ================================================ use super::Pushable; use std::marker::PhantomData; /// A counter that Implements CancelPushable which counts up to a fixed value and /// Cancels counting if this value has been reached pub struct MaxCounter { val: usize, max: usize, p: PhantomData, } impl MaxCounter { #[inline] pub fn new(max: usize) -> Self { Self { val: 0, max, p: PhantomData, } } #[inline] pub fn val(&self) -> usize { self.val } #[inline] pub fn inc(&mut self, delta: usize) { self.val += delta; } #[inline] pub fn is_full(&self) -> bool { self.val >= self.max } } impl Pushable for MaxCounter { type Item = T; #[inline] fn push(&mut self, _i: Self::Item) -> bool { if self.is_full() { return false; } self.val += 1; true } } ================================================ FILE: lib/engine/src/pushable/mod.rs ================================================ pub mod counter; pub mod f_max_cnt; pub mod max_cnt; pub mod push_dbg; pub mod push_fn; pub mod push_mod; pub use counter::Counter; pub use f_max_cnt::FilteredMaxCounter; pub use max_cnt::MaxCounter; pub use push_mod::PushMod; use super::relevance::item::RelItem; use priority_container::StableUniquePrioContainerMax; use std::hash::Hash; pub trait Pushable { type Item; fn push(&mut self, i: Self::Item) -> bool; } impl Pushable for StableUniquePrioContainerMax> where T: Eq + Hash + Clone, { type Item = RelItem; #[inline] fn push(&mut self, i: Self::Item) -> bool { self.insert(i); true } } ================================================ FILE: lib/engine/src/pushable/push_dbg.rs ================================================ use super::Pushable; use std::{fmt::Debug, marker::PhantomData}; /// Allows debugging pushed items pub struct PushDbg<'a, P, I> { output: &'a mut P, p: PhantomData, } impl<'a, P, I> PushDbg<'a, P, I> where P: Pushable, I: Debug, { pub fn new(output: &'a mut P) -> Self { Self { output, p: PhantomData, } } } impl<'a, P, I> Pushable for PushDbg<'a, P, I> where P: Pushable, I: Debug, { type Item = I; #[inline] fn push(&mut self, i: Self::Item) -> bool { print!("{i:#?}"); let cont = self.output.push(i); println!(" continue: {cont}"); cont } } ================================================ FILE: lib/engine/src/pushable/push_fn.rs ================================================ use super::Pushable; use std::marker::PhantomData; pub struct PushFn { f: F, p: PhantomData, } impl PushFn where F: FnMut(T) -> bool, { #[inline] pub fn new(f: F) -> Self { Self { f, p: PhantomData } } } impl Pushable for PushFn where F: FnMut(T) -> bool, { type Item = T; #[inline] fn push(&mut self, i: Self::Item) -> bool { (self.f)(i) } } ================================================ FILE: lib/engine/src/pushable/push_mod.rs ================================================ use std::marker::PhantomData; use super::Pushable; /// Allows modifying pushed data pub struct PushMod<'a, P, I, O, F> { output: &'a mut P, f: F, p: PhantomData, p2: PhantomData, } impl<'a, P, I, O, F> PushMod<'a, P, I, O, F> where P: Pushable, F: Fn(I) -> O, { pub fn new(output: &'a mut P, f: F) -> Self { Self { output, f, p: PhantomData, p2: PhantomData, } } } impl<'a, P, I, O, F> Pushable for PushMod<'a, P, I, O, F> where F: Fn(I) -> O, P: Pushable, { type Item = I; #[inline] fn push(&mut self, i: Self::Item) -> bool { self.output.push((self.f)(i)) } } ================================================ FILE: lib/engine/src/relevance/data.rs ================================================ use sparse_vec::{SpVec32, VecExt}; use types::jotoba::language::Language; /// Item to sort stuff #[derive(Debug)] pub struct SortData<'item, 'query, T, I, Q> { out_item: &'item T, index_item: &'item I, rel: f32, query_str: &'query str, query: &'query Q, language: Option, threshold: Option, } impl<'item, 'query, T, I, Q> SortData<'item, 'query, T, I, Q> { #[inline] pub fn new( out_item: &'item T, index_item: &'item I, rel: f32, query: &'query Q, query_str: &'query str, language: Option, threshold: Option, ) -> Self { Self { out_item, index_item, rel, query_str, query, language, threshold, } } #[inline] pub fn item(&self) -> &T { self.out_item } #[inline] pub fn rel(&self) -> f32 { self.rel } #[inline] pub fn query_str(&self) -> &str { self.query_str } #[inline] pub fn language(&self) -> Option { self.language } #[inline] pub fn query(&self) -> &'query Q { self.query } #[inline] pub fn index_item(&self) -> &I { self.index_item } #[inline] pub fn threshold(&self) -> Option { self.threshold } } impl<'item, 'query, T, I> SortData<'item, 'query, T, I, SpVec32> where I: AsRef, { #[inline] pub fn vec_similarity(&self) -> f32 { self.query.cosine(self.index_item.as_ref()) } } ================================================ FILE: lib/engine/src/relevance/item.rs ================================================ use std::{ cmp::Ordering, hash::{Hash, Hasher}, }; /// A single item (result) in a set of search results #[derive(Clone, Copy, Default, Debug)] pub struct RelItem { pub item: T, pub relevance: f32, } impl RelItem { /// Create a new ResultItem #[inline] pub fn new(item: T, relevance: f32) -> Self { Self { item, relevance } } } impl RelItem { /// Maps the item within the result without changing other data #[inline] pub fn map_item(self, f: F) -> RelItem where F: Fn(T) -> O, { let item = (f)(self.item); RelItem { item, relevance: self.relevance, } } } impl PartialEq for RelItem { #[inline(always)] fn eq(&self, other: &Self) -> bool { self.item == other.item } } impl Eq for RelItem {} impl Hash for RelItem { #[inline] fn hash(&self, state: &mut H) { self.item.hash(state); } } impl PartialOrd for RelItem { #[inline] fn partial_cmp(&self, other: &Self) -> Option { Some(self.relevance.total_cmp(&other.relevance)) } } impl Ord for RelItem { #[inline] fn cmp(&self, other: &Self) -> Ordering { self.relevance.total_cmp(&other.relevance) } } ================================================ FILE: lib/engine/src/relevance/mod.rs ================================================ pub mod data; pub mod item; use data::SortData; use types::jotoba::language::Language; pub trait RelevanceEngine { type OutItem; type IndexItem; type Query; fn init(&mut self, _init: RelEngineInit) {} fn score<'item, 'query>( &self, item: &SortData<'item, 'query, Self::OutItem, Self::IndexItem, Self::Query>, ) -> f32; } pub struct RelEngineInit { pub query: String, pub language: Option, } impl RelEngineInit { #[inline] pub(crate) fn new(query: String, language: Option) -> Self { Self { query, language } } } ================================================ FILE: lib/engine/src/result.rs ================================================ use super::relevance::item::RelItem; use std::{fmt::Debug, slice::Iter}; /// A result from a search. Contains information about the actual /// amount of items returned and the items to display on the current page. /// The items are always ordered pub struct SearchResult { pub total_items: usize, pub items: Vec>, } impl SearchResult { /// Create a new `SearchResult` from a list of items. Requires `items` to be sorted #[inline] pub fn new(items: Vec>, total_items: usize) -> Self { Self { items, total_items } } } impl SearchResult { /// Get the total amount of items in the result. This value is /// always bigger or equal to the length of the items in the resultset #[inline] pub fn len(&self) -> usize { self.total_items } /// Returns `true` if result is empty #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns an iterator over the raw result items #[inline] pub fn iter(&self) -> Iter<'_, RelItem> { self.items.iter() } #[inline] pub fn into_inner(self) -> Vec> { self.items } /// Returns an iterator over the raw result items #[inline] pub fn into_iter(self) -> impl Iterator { self.items.into_iter().map(|i| i.item) } /// Returns the item at `index` from the result or None if index is out of bounds #[inline] pub fn get(&self, index: usize) -> Option<&RelItem> { self.items.get(index) } } impl Default for SearchResult { #[inline] fn default() -> Self { Self { total_items: 0, items: vec![], } } } impl Debug for SearchResult { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("SearchResult") .field("total_items", &self.total_items) .field("items", &self.items) .finish() } } ================================================ FILE: lib/engine/src/task.rs ================================================ use crate::{ pushable::{MaxCounter, PushMod, Pushable}, relevance::{data::SortData, RelevanceEngine}, relevance::{item::RelItem, RelEngineInit}, result::SearchResult, Engine, }; use priority_container::StableUniquePrioContainerMax; use std::marker::PhantomData; use types::jotoba::{ language::Language, search::guess::{Guess, GuessType}, }; pub struct SearchTask<'index, E: Engine<'index>> { /// Search query query_str: String, /// Language to search in query_lang: Option, /// filter out items item_filter: Option bool>>, /// Filter out results res_filter: Option bool>>, /// Custom result order function cust_order: Option< Box>, >, /// Min relevance returned from search algo threshold: f32, /// Max distance to max item max_dist: Option, limit: usize, offset: usize, est_limit: usize, phantom: PhantomData, } impl<'index, E> SearchTask<'index, E> where E: Engine<'index> + 'index, { #[inline] pub fn new>(query: S) -> Self { let mut task = Self::default(); task.query_str = query.as_ref().to_string(); task } /// Creates a new Search task with a query assigned language #[inline] pub fn with_language>(query: S, language: Language) -> Self { let mut task = Self::default(); task.query_str = query.as_ref().to_string(); task.query_lang = Some(language); task } /// Returns `true` if the SearchTask has a language assigned #[inline] pub fn has_language(&self) -> bool { self.query_lang.is_some() } /// Set the total limit. This is the max amount of vectors which will be loaded and processed #[inline] pub fn with_limit(mut self, total_limit: usize) -> Self { self.limit = total_limit; self } /// Sets the max distance a result item can be ratet in order to be a part of the final result set #[inline] pub fn with_max_dist(mut self, max_dist: f32) -> Self { self.max_dist = Some(max_dist); self } /// Sets the search task's threshold. This does not apply on the final score, which can be /// overwritten by `order` but applies to the vector space relevance itself. #[inline] pub fn with_threshold(mut self, threshold: f32) -> Self { self.threshold = threshold; self } /// Returns `true` if there is a threshold set #[inline] pub fn has_threshold(&self) -> bool { self.threshold > 0.0 } /// Sets the offeset of the search. Can be used for pagination. Requires output of search being /// directly used and not manually reordered pub fn with_offset(mut self, offset: usize) -> Self { self.offset = offset; self } /// Set the search task's result filter. pub fn with_result_filter(mut self, res_filter: F) -> Self where F: Fn(&E::Output) -> bool, { self.res_filter = Some(Box::new(res_filter)); self } /// Set the search task's custom order function pub fn with_custom_order( mut self, res_filter: impl RelevanceEngine + 'static, ) -> Self { self.cust_order = Some(Box::new(res_filter)); self } /// Set the search task's raw document filter pub fn with_item_filter(mut self, item_filter: F) -> Self where F: Fn(&E::Document) -> bool, { self.item_filter = Some(Box::new(item_filter)); self } /// Runs the search task and returns the result. pub fn find(&mut self) -> SearchResult { self.rel_init(); let cap = self.limit + self.offset; let mut pqueue = StableUniquePrioContainerMax::new_allocated(cap, cap); self.find_to(&mut pqueue); self.make_result(pqueue) } /// Rettrieves results and pushes them into `out` #[inline] pub fn find_to(&mut self, out: &mut O) -> Option where O: Pushable>, { self.rel_init(); self.find_to_inner(out, true) } /// Estimates the amount of results efficiently. This 'guess' is defined as follows: /// /// Be 'm' the amount of items a full search would return. /// Be 'n' the guess returned by this function. /// /// - n = 0 => m = 0 /// - n <= m pub fn estimate_result_count(&mut self) -> Guess { self.rel_init(); let mut counter = MaxCounter::new(self.est_limit + 1); self.estimate_to(&mut counter); let estimated = counter.val(); let mut guess_type = GuessType::Undefined; if (estimated <= self.est_limit) || estimated == 0 { // All filtering operations are applied in estimation algorithm as well. // Since we use the max value of query // result, we can only assure it being accurate if there was only one query and no // Limit was reached. From the 1st condition follows that estimated == 0 implies // an accurate results guess_type = GuessType::Accurate; } else if estimated > self.est_limit { // Were counting 1 more than `est_limit`. Thus `estimated` being bigger than limit // means there are more elements than the given limit. However since were returning a // number <= est_limit, relatively to the estimation the guess type is `Opentop` guess_type = GuessType::MoreThan; } let est_result = (estimated).min(self.est_limit) as u32; Guess::new(est_result, guess_type) } /// Estimates result count by pushing elements to `out` #[inline] pub fn estimate_to

(&mut self, out: &mut P) where P: Pushable, { self.rel_init(); let mut out = PushMod::new(out, |i: RelItem| i.item); self.find_to_inner(&mut out, false); } /// Retrieves results and pushes all items into `out`. Calculates relevance for each item if `sort` is true or /// The SearchTask has a threshold set. fn find_to_inner(&self, out: &mut O, sort: bool) -> Option where O: Pushable>, { let query = E::make_query(&self.query_str, self.query_lang)?; let mut retr: E::Retriever = E::retrieve_for(&query, &self.query_str, self.query_lang).get(); let mut pushed = 0; loop { let (index_item, out_items) = match self.retrieve_next(&mut retr) { Some(v) => v, None => break, }; for i in out_items { let score = if sort || self.has_threshold() { self.score(&i, &index_item, &query) } else { 0.0 }; if self.has_threshold() && score < self.threshold { continue; } // Break if caller doesn't want to consume more pushed += 1; if !out.push(RelItem::new(i, score)) { break; } } } Some(pushed) } #[inline] fn score(&self, out_item: &E::Output, index_item: &E::Document, query: &E::Query) -> f32 { let threshold = self.has_threshold().then(|| self.threshold); let s_data = SortData::new( out_item, index_item, 0.0, query, &self.query_str, self.query_lang, threshold, ); self.cust_order .as_ref() .map(|i| i.score(&s_data)) .unwrap_or(0.0) } /// Builds output from the given Prio Queue fn make_result( &self, data: StableUniquePrioContainerMax>, ) -> SearchResult { let total_count = data.total_pushed(); let p_items = self.take_page(data); SearchResult::new(p_items, total_count) } /// Takes the correct page from a UniquePrioContainerMax based on the given offset and limit #[inline] fn take_page(&self, pqueue: StableUniquePrioContainerMax) -> Vec { super::utils::page_from_pqueue(self.limit, self.offset, pqueue) } #[inline] fn retrieve_next(&self, retr: &mut E::Retriever) -> Option<(E::Document, Vec)> { let next = retr.next()?; if !self.item_filter(&next) { return Some((next, vec![])); }; let mut out_items = E::doc_to_output(&next).unwrap_or_default(); if out_items.is_empty() { return Some((next, out_items)); } if let Some(ref filter) = self.res_filter { out_items.retain(|i| filter(i)); } Some((next, out_items)) } /// Returns `false` if the item has to be removed from the result #[inline] fn item_filter(&self, item: &E::Document) -> bool { self.item_filter.as_ref().map(|i| i(item)).unwrap_or(true) } #[inline] fn rel_init(&mut self) { if self.cust_order.is_none() { return; } let init = self.make_rel_init(); self.cust_order.as_mut().unwrap().init(init); } #[inline] fn make_rel_init(&self) -> RelEngineInit { RelEngineInit::new(self.query_str.clone(), self.query_lang) } } impl<'a, T: Engine<'a>> Default for SearchTask<'a, T> { #[inline] fn default() -> Self { Self { query_str: Default::default(), query_lang: None, item_filter: None, res_filter: None, cust_order: None, threshold: 0.0, limit: 1000, offset: 0, est_limit: 100, phantom: PhantomData, max_dist: None, } } } ================================================ FILE: lib/engine/src/utils.rs ================================================ use crate::relevance::item::RelItem; use priority_container::StableUniquePrioContainerMax; /// Takes the correct "limit" elements form a from a UniquePrioContainerMax at "offset" pub fn page_from_pqueue( limit: usize, offset: usize, pqueue: StableUniquePrioContainerMax, ) -> Vec { let len = pqueue.len(); let take = (len.saturating_sub(offset)).min(limit); let to_skip = len.saturating_sub(offset + take); let mut o: Vec<_> = pqueue.into_iter().skip(to_skip).take(take).collect(); o.reverse(); o } /// Takes the correct "limit" elements form a from a UniquePrioContainerMax at "offset" pub fn page_from_pqueue_with_max_dist( limit: usize, offset: usize, max_dist: f32, max: f32, pqueue: StableUniquePrioContainerMax>, ) -> Vec> { let peeked = pqueue.peek(); if peeked.is_none() { return vec![]; } let len = pqueue.len(); let take = (len.saturating_sub(offset)).min(limit); let to_skip = len.saturating_sub(offset + take); let mut o: Vec<_> = pqueue .into_iter() .filter(|i| i.relevance + max_dist >= max || max_dist == 0.0) .skip(to_skip) .take(take) .collect(); o.reverse(); o } ================================================ FILE: lib/error/Cargo.toml ================================================ [package] name = "error" version = "0.1.0" authors = ["jojii "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] actix-web = { version = "4.3.1", optional = true } serde = "1.0.171" strum = "0.25.0" thiserror = "1.0.43" [features] default = [] web_error = ["actix-web"] ================================================ FILE: lib/error/src/api_error.rs ================================================ #![allow(dead_code, unreachable_patterns)] use actix_web::{error::BlockingError, http::StatusCode, HttpResponse, ResponseError}; use serde::Serialize; use thiserror::Error; #[derive(Clone, Copy, PartialEq)] pub enum Origin { Radicals, Suggestions, File, } impl std::fmt::Debug for Origin { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "{}", match self { Origin::Radicals => "radicals", Origin::Suggestions => "suggestions", Origin::File => "file", } ) } } #[derive(Error, Debug, Clone, Copy, PartialEq)] pub enum RestError { #[error("Not found")] NotFound, #[error("Bad request")] BadRequest, #[error("Internal server error")] Internal, #[error("Timeout exceeded")] Timeout, #[error("IO error")] IoError, #[error("Format not supported")] FormatNotSupported, #[error("No text found")] NoTextFound, #[error("missing {0:?}")] Missing(Origin), #[error("Unauthorized")] Unauthorized, } /// Error response format. Used as json encoding structure #[derive(Serialize)] struct ErrorResponse { code: u16, error: String, message: String, } impl RestError { pub fn name(&self) -> String { match self { Self::NotFound => "NotFound".to_string(), Self::BadRequest => "BadRequest".to_string(), Self::Internal => "InternalError".to_string(), Self::Timeout => "Timeout".to_string(), Self::IoError => "IoError".to_string(), Self::NoTextFound => "NoTextFound".to_string(), Self::FormatNotSupported => "FormatNotSupported".to_string(), Self::Unauthorized => "Unauthtorized".to_string(), _ => "InternalError".to_string(), } } } /// Implement ResponseError trait. Required for actix web impl ResponseError for RestError { fn status_code(&self) -> StatusCode { match *self { Self::NotFound => StatusCode::NOT_FOUND, Self::BadRequest => StatusCode::BAD_REQUEST, Self::Internal => StatusCode::INTERNAL_SERVER_ERROR, Self::Timeout => StatusCode::REQUEST_TIMEOUT, Self::FormatNotSupported => StatusCode::BAD_REQUEST, Self::NoTextFound => StatusCode::SEE_OTHER, Self::Unauthorized => StatusCode::UNAUTHORIZED, _ => StatusCode::INTERNAL_SERVER_ERROR, } } fn error_response(&self) -> HttpResponse { let status_code = self.status_code(); let error_response = ErrorResponse { code: status_code.as_u16(), message: self.to_string(), error: self.name(), }; HttpResponse::build(status_code).json(error_response) } } impl From for RestError { #[inline] fn from(err: super::Error) -> Self { eprintln!("Error: {:?}", err); match err { crate::Error::NotFound => Self::NotFound, _ => Self::Internal, } } } impl From for RestError { #[inline] fn from(_: std::io::Error) -> Self { Self::IoError } } impl From for RestError { #[inline] fn from(_: BlockingError) -> Self { Self::Internal } } ================================================ FILE: lib/error/src/lib.rs ================================================ #[cfg(feature = "web_error")] pub mod api_error; use std::{fmt::Display, num::ParseIntError, string::FromUtf8Error}; use strum::ParseError; #[derive(Debug)] pub enum Error { NotFound, ParseInt(ParseIntError), Utf8Error(FromUtf8Error), Utf8StrError(std::str::Utf8Error), ParseError, Undefined, IoError(std::io::Error), Unexpected, } impl From for Error { fn from(err: std::io::Error) -> Self { Self::IoError(err) } } impl From for Error { fn from(err: FromUtf8Error) -> Self { Self::Utf8Error(err) } } impl From for Error { fn from(err: ParseError) -> Self { match err { ParseError::VariantNotFound => Self::ParseError, } } } impl From for Error { fn from(err: ParseIntError) -> Self { Self::ParseInt(err) } } impl From for Error { fn from(err: std::str::Utf8Error) -> Self { Self::Utf8StrError(err) } } impl Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } impl std::error::Error for Error {} ================================================ FILE: lib/frontend/Cargo.toml ================================================ [package] name = "frontend" version = "0.1.0" authors = ["jojii "] edition = "2021" build = "src/build.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] japanese = { path = "../japanese" } news = { path = "../news"} search = { path = "../search" } error = { path = "../error" } utils = { path = "../utils" } config = { path = "../config" } localization = { path = "../localization" } resources = { path = "../resources"} types = { path = "../types" , features = ["jotoba_intern"]} actix-web = "4.3.1" serde = "1.0.171" sentry = { version = "0.31.5", optional = true } log = "0.4.19" percent-encoding = "2.3.0" itertools = "0.11.0" jp_utils = { git = "https://github.com/JojiiOfficial/jp_utils", features = ["furigana"] } [dev-dependencies] ructe = "0.15.0" [build-dependencies] ructe = "0.15.0" [features] sentry_error = ["sentry"] ================================================ FILE: lib/frontend/src/about.rs ================================================ use std::sync::Arc; //use actix_session::Session; use actix_web::{web, HttpRequest, HttpResponse}; use config::Config; use localization::TranslationDict; use crate::{ templates, user_settings, {BaseData, Site}, }; /// About page pub async fn about( locale_dict: web::Data>, config: web::Data, request: HttpRequest, ) -> Result { let settings = user_settings::parse(&request); //session::init(&session, &settings); Ok(HttpResponse::Ok().body( render!( templates::base, BaseData::new(&locale_dict, settings, &config.asset_hash, &config) .with_site(Site::About) ) .render(), )) } ================================================ FILE: lib/frontend/src/actix_ructe.rs ================================================ macro_rules! render { ($template:path) => (super::actix_ructe::Render(|o| $template(o))); ($template:path, $($arg:expr),*) => {{ use super::actix_ructe::Render; Render(|o| $template(o, $($arg),*)) }}; ($template:path, $($arg:expr),* ,) => {{ use super::actix_ructe::Render; Render(|o| $template(o, $($arg),*)) }}; } pub struct Render) -> std::io::Result<()>>(pub T); impl) -> std::io::Result<()>> Render { pub fn render(self) -> Vec { let mut bytes = Vec::new(); self.0(&mut bytes).unwrap(); bytes } } ================================================ FILE: lib/frontend/src/build.rs ================================================ use ructe::{Result, Ructe}; fn main() -> Result<()> { let mut ructe = Ructe::from_env()?; ructe.compile_templates("templates") } ================================================ FILE: lib/frontend/src/direct.rs ================================================ use std::sync::Arc; use actix_web::{web, HttpRequest, HttpResponse}; use config::Config; use localization::TranslationDict; use search::{ query::{Query, UserSettings}, sentence, word::result::AddResData, }; use types::jotoba::{ search::SearchTarget, words::{filter_languages, Word}, }; use crate::{ og_tags::{self, TagKeyName}, search_ep::redirect_home, templates, user_settings, web_error::{self, Error}, BaseData, ResultData, SearchResult, }; /// Endpoint to perform a search pub async fn direct_ep( h_query: web::Path<(u8, String)>, locale_dict: web::Data>, config: web::Data, request: HttpRequest, ) -> Result { let settings = user_settings::parse(&request); let (stype, id) = h_query.into_inner(); let query_type = SearchTarget::try_from(stype).map_err(|_| Error::BadRequest)?; let result_data = match query_type { SearchTarget::Words => find_direct_word(&id, &settings).await, SearchTarget::Names => find_direct_name(&id).await, SearchTarget::Sentences => find_direct_sentence(&id, &settings).await, SearchTarget::Kanji => return Ok(redirect_home()), }; if let Err(err) = result_data { return match err { web_error::Error::NotFound => Err(err), _ => Ok(redirect_home()), }; } let query = Query::default(); let mut base_data = BaseData::new(&locale_dict, settings, &config.asset_hash, &config) .with_search_result(&query, result_data.unwrap(), None); set_og_tag(&mut base_data, query_type); Ok(HttpResponse::Ok().body(render!(templates::base, base_data).render())) } fn set_og_tag(base_data: &mut BaseData, query_type: SearchTarget) { let search_result = base_data.site.as_search_result().unwrap(); let mut search_res_og = og_tags::TagSet::with_capacity(5); let title = match query_type { SearchTarget::Kanji => return, SearchTarget::Sentences => "Jotoba sentence".to_string(), SearchTarget::Names => format!("{} - Jotoba name", search_res_val(&search_result).unwrap()), SearchTarget::Words => format!("{} - Jotoba word", search_res_val(&search_result).unwrap()), }; let descrption = "Jotoba entry. See more..."; search_res_og.add_og(TagKeyName::Title, &title); search_res_og.add_twitter(TagKeyName::Title, &title); search_res_og.add_og(TagKeyName::Description, descrption); search_res_og.add_twitter(TagKeyName::Description, descrption); search_res_og.add_twitter(TagKeyName::Card, "summary"); base_data.set_og_tags(search_res_og); } fn search_res_val(res: &SearchResult) -> Option { Some(match &res.result { ResultData::Word(w) => w.items[0].get_reading().reading.clone(), ResultData::Name(n) => n[0].kanji.as_ref().unwrap_or(&n[0].kana).to_string(), _ => return None, }) } /// Find direct word pub async fn find_direct_word(id: &str, settings: &UserSettings) -> Result { let sequence_id: u32 = id.parse().map_err(|_| Error::NotFound)?; let res_name = resources::get() .words() .by_sequence(sequence_id) .ok_or(web_error::Error::NotFound)? .clone(); let mut results = vec![res_name]; // also show enlgish if otherwise no results would be shown due users settings let show_english = !results[0].has_language(settings.user_lang) || settings.show_english; filter_languages(results.iter_mut(), (settings.user_lang, show_english)); let word = results.remove(0); Ok(ResultData::Word( search::executor::search_result::SearchResult::::with_other_default( vec![word], 1, ), )) /* Ok(ResultData::Word(WordResult { items, count: 1, contains_kanji, inflection_info: None, sentence_parts: None, sentence_index: 0, searched_query: String::new(), }))*/ } /// Find direct name pub async fn find_direct_name(id: &str) -> Result { let sequence_id: u32 = id.parse().map_err(|_| Error::NotFound)?; let res_word = resources::get() .names() .by_sequence(sequence_id) .ok_or(web_error::Error::NotFound)?; Ok(ResultData::Name(vec![res_word])) } /// Find direct sentence pub async fn find_direct_sentence(id: &str, settings: &UserSettings) -> Result { let sequence_id: u32 = id.parse().map_err(|_| Error::NotFound)?; let res_sentence = resources::get() .sentences() .by_id(sequence_id) .ok_or(web_error::Error::NotFound)?; let res_sentence = sentence::result::Sentence::from_m_sentence(res_sentence, (settings.user_lang, true)) .unwrap(); use search::executor::search_result::SearchResult as SearchResult2; Ok(ResultData::Sentence(SearchResult2 { items: vec![res_sentence], total: 1, other_data: sentence::result::ResData::new(false), })) } ================================================ FILE: lib/frontend/src/help_page.rs ================================================ use std::sync::Arc; //use actix_session::Session; use actix_web::{web, HttpRequest, HttpResponse}; use config::Config; use localization::TranslationDict; use crate::{ templates, user_settings, {BaseData, Site}, }; /// About page pub async fn help( locale_dict: web::Data>, request: HttpRequest, config: web::Data, ) -> Result { let settings = user_settings::parse(&request); //session::init(&session, &settings); Ok(HttpResponse::Ok().body( render!( templates::base, BaseData::new(&locale_dict, settings, &config.asset_hash, &config) .with_site(Site::InfoPage) ) .render(), )) } ================================================ FILE: lib/frontend/src/index.rs ================================================ use std::sync::Arc; //use actix_session::Session; use actix_web::{web, HttpRequest, HttpResponse}; use config::Config; use localization::TranslationDict; use crate::{ templates, user_settings, {BaseData, Site}, }; /// Homepage pub async fn index( locale_dict: web::Data>, request: HttpRequest, config: web::Data, ) -> Result { let settings = user_settings::parse(&request); //session::init(&session, &settings); Ok(HttpResponse::Ok().body( render!( templates::base_index, BaseData::new(&locale_dict, settings, &config.asset_hash, &config) .with_site(Site::Index) ) .render(), )) } ================================================ FILE: lib/frontend/src/lib.rs ================================================ include!(concat!(env!("OUT_DIR"), "/templates.rs")); #[macro_use] mod actix_ructe; pub mod about; pub mod direct; pub mod help_page; pub mod index; pub mod liveness; pub mod news_ep; pub mod og_tags; pub mod search_ep; //pub mod search_help; mod session; pub mod templ_utils; pub mod unescaped; mod url_query; pub mod user_settings; pub mod web_error; use std::fmt::Display; use config::Config; use localization::{ language::Language, traits::{Translatable, TranslatablePlural}, TranslationDict, }; use news::NewsEntry; use og_tags::TagKeyName; use search::{executor::search_result::SearchResult as SearchResult2, query::Query}; use search::{kanji::result::Item as KanjiItem, query::UserSettings}; use types::jotoba::{ names::Name, pagination::Pagination, search::{help::SearchHelp, SearchTarget}, words::Word, }; use unescaped::{UnescapedStr, UnescapedString}; /// Data for the base template pub struct BaseData<'a> { pub site: Site<'a>, pub dict: &'a TranslationDict, pub user_settings: UserSettings, pub pagination: Option, pub asset_hash: &'a str, pub config: &'a Config, pub og_tags: Option, } /// The site to display #[derive(Clone)] pub enum Site<'a> { SearchResult(SearchResult<'a>), Index, About, InfoPage, News(Vec), } /// Search result data. Required by individual templates to render the result items #[derive(Clone, Debug)] pub struct SearchResult<'a> { pub query: &'a Query, pub result: ResultData, pub search_help: Option, } /// The particular search result items #[derive(Clone, Debug)] pub enum ResultData { Word(SearchResult2), KanjiInfo(Vec), Name(Vec<&'static Name>), Sentence(SearchResult2), } impl<'a> BaseData<'a> { #[inline] pub fn new( dict: &'a TranslationDict, user_settings: UserSettings, asset_hash: &'a str, config: &'a Config, ) -> Self { Self { site: Site::Index, dict, user_settings, pagination: None, asset_hash, config, og_tags: None, } } #[inline] pub fn with_site(mut self, site: Site<'a>) -> Self { self.site = site; self } #[inline] pub fn with_cust_pages( &mut self, items: u32, curr_page: u32, items_per_page: u32, max_pages: u32, ) { let mut pagination = Pagination { items, curr_page, items_per_page, max_pages, }; // Don't show paginator if there is only one or no page if pagination.get_last() <= 1 { return; } if curr_page > pagination.get_last() { pagination.curr_page = pagination.get_last(); } self.pagination = Some(pagination); } #[inline] pub fn with_pages(&mut self, items: u32, curr_page: u32) { self.with_cust_pages(items, curr_page, self.user_settings.page_size, 100); } #[inline] pub fn get_search_help(&self) -> Option<&SearchHelp> { let help = self.site.as_search_result()?.search_help.as_ref()?; (!help.is_empty()).then(|| help) } #[inline] pub fn get_search_site_id(&self) -> u8 { if let Site::SearchResult(ref res) = self.site { return match res.result { ResultData::Word(_) => 0, ResultData::KanjiInfo(_) => 1, ResultData::Sentence(_) => 2, ResultData::Name(_) => 3, }; } 0 } #[inline] pub fn get_search_site_name(&self) -> &str { if let Site::SearchResult(ref res) = self.site { return match res.result { ResultData::Word(_) => self.gettext("Words").as_str(), ResultData::KanjiInfo(_) => self.gettext("Kanji").as_str(), ResultData::Sentence(_) => self.gettext("Sentences").as_str(), ResultData::Name(_) => self.gettext("Names").as_str(), }; } self.gettext("Words").as_str() } #[inline] pub fn with_search_result( self, query: &'a Query, result: ResultData, search_help: Option, ) -> Self { let search_result = SearchResult { query, result, search_help, }; self.with_site(Site::SearchResult(search_result)) } /// Gets an owned String of the query pub fn get_query_str(&self) -> String { let query = match &self.site { Site::SearchResult(search_result) => { Some(search_result.query.without_search_type_tags()) } _ => None, } .unwrap_or_default(); query } /// Return a string 'selected' if the query_type in qs is equal to i pub fn sel_str(&self, i: SearchTarget) -> &'static str { let is_selected = match &self.site { Site::SearchResult(search_result) => search_result.query.target == i, _ => false, }; if is_selected { "selected" } else { "" } } /// Returns true if the kanji compounds should be collapsed by default pub fn kanji_copounds_collapsed(&self) -> bool { self.pagination.as_ref().map(|i| i.get_last()).unwrap_or(0) > 1 } /// Sets og tags which will overwrite the site-defaults if existing pub fn set_og_tags(&mut self, tags: og_tags::TagSet) { self.og_tags = Some(tags); } /// returns OG Tags pub fn get_og_tags(&self) -> Option { if let Some(override_tags) = &self.og_tags { return Some(override_tags.clone()); } self.site.og_tags() } #[inline] pub fn assets_path(&self) -> &str { self.config.server.get_html_files() } } impl<'a> Site<'a> { #[inline] pub fn as_search_result(&self) -> Option<&SearchResult<'a>> { if let Self::SearchResult(v) = self { Some(v) } else { None } } /// Returns proper OG tags for the current site pub fn og_tags(&self) -> Option { Some(match self { Site::SearchResult(rs) => rs.og_tags(), _ => default_og_tags(), }) } } impl ResultData { /// Returns `true` if the ResultData does not contain any items #[inline] pub fn is_empty(&self) -> bool { match self { ResultData::Word(w) => w.items.is_empty() && w.sentence.is_none(), ResultData::KanjiInfo(k) => k.is_empty(), ResultData::Name(n) => n.is_empty(), ResultData::Sentence(s) => s.items.is_empty(), } } } impl<'a> SearchResult<'a> { pub fn og_tags(&self) -> og_tags::TagSet { let mut tags = og_tags::TagSet::with_capacity(5); let search_type_name = self.search_type_ogg(); let query = &self.query.query_str; let title = format!("Jotoba {search_type_name} search result for '{query}'"); let description = self.og_tag_description(); tags.add_og(TagKeyName::Title, &title); tags.add_og(TagKeyName::Description, &description); tags.add_twitter(TagKeyName::Title, &title); tags.add_twitter(TagKeyName::Description, &description); tags.add_twitter(TagKeyName::Card, "summary"); tags } pub(crate) fn og_tag_description(&self) -> String { format!("{} results. See more...", self.result_count()) } pub(crate) fn search_type_ogg(&self) -> &'static str { match self.result { ResultData::Word(_) => "words", ResultData::KanjiInfo(_) => "kanji", ResultData::Sentence(_) => "sentences", ResultData::Name(_) => "names", } } fn result_count(&self) -> usize { match &self.result { ResultData::Word(w) => w.items.len(), ResultData::KanjiInfo(k) => k.len(), ResultData::Name(n) => n.len(), ResultData::Sentence(s) => s.items.len(), } } } fn default_og_tags() -> og_tags::TagSet { let mut tags = og_tags::TagSet::new(); let description = "A powerful and free Japanese dictionary supporting words, kanji, sentences, and many different languages."; tags.add_og(TagKeyName::Title, "Jotoba"); tags.add_og(TagKeyName::Description, description); tags.add_og(TagKeyName::URL, "https://jotoba.de"); tags.add_twitter(TagKeyName::Title, "Jotoba"); tags.add_twitter(TagKeyName::Description, description); tags } /// Translation helper impl<'a> BaseData<'a> { #[inline] pub fn get_lang(&self) -> Language { self.user_settings.page_lang } #[inline] pub fn gettext(&self, t: T) -> UnescapedStr<'a> { t.gettext(&self.dict, Some(self.get_lang())).into() } #[inline] pub fn gettext_custom(&self, t: T) -> UnescapedString { t.gettext_custom(&self.dict, Some(self.get_lang())).into() } #[inline] pub fn pgettext(&self, t: T, context: &'a str) -> UnescapedStr<'a> { t.pgettext(&self.dict, context, Some(self.get_lang())) .into() } #[inline] pub fn ngettext(&self, t: T, n: u64) -> UnescapedStr<'a> { t.ngettext(&self.dict, n, Some(self.get_lang())).into() } #[inline] pub fn pngettext( &self, t: T, context: &'a str, n: u64, ) -> UnescapedStr<'a> { t.npgettext(&self.dict, context, n, Some(self.get_lang())) .into() } // Format functions #[inline] pub fn gettext_fmt( &self, t: T, values: &[V], ) -> UnescapedString { t.gettext_fmt(&self.dict, values, Some(self.get_lang())) .into() } #[inline] pub fn pgettext_fmt( &self, t: T, context: &'a str, values: &[V], ) -> UnescapedString { t.pgettext_fmt(&self.dict, context, values, Some(self.get_lang())) .into() } #[inline] pub fn ngettext_fmt( &self, t: T, n: u64, values: &[V], ) -> UnescapedString { t.ngettext_fmt(&self.dict, n, values, Some(self.get_lang())) .into() } #[inline] pub fn pngettext_fmt( &self, t: T, context: &'a str, n: u64, values: &[V], ) -> UnescapedString { t.npgettext_fmt(&self.dict, context, n, values, Some(self.get_lang())) .into() } #[inline] pub fn gt_search_link( &self, t: T, value: V, ) -> UnescapedString { let link = format_search_link(value); t.gettext_fmt(&self.dict, &[link], Some(self.get_lang())) .into() } #[inline] pub fn gt_search_links( &self, t: T, link: usize, values: &[V], ) -> UnescapedString { let mut values = values.iter().map(|i| i.to_string()).collect::>(); values[link] = format_search_link(&values[link]); t.gettext_fmt(&self.dict, &values, Some(self.get_lang())) .into() } #[inline] pub fn ngt_search_links( &self, t: T, link: usize, values: &[V], n: u64, ) -> UnescapedString { let mut values = values.iter().map(|i| i.to_string()).collect::>(); values[link] = format_search_link(&values[link]); t.ngettext_fmt(&self.dict, n, &values, Some(self.get_lang())) .into() } } fn format_search_link(input: V) -> String { format!( "{}", input, input ) } ================================================ FILE: lib/frontend/src/liveness.rs ================================================ use actix_web::HttpResponse; pub async fn ready() -> HttpResponse { HttpResponse::Ok().finish() } pub async fn healthy() -> HttpResponse { HttpResponse::Ok().finish() } ================================================ FILE: lib/frontend/src/news_ep.rs ================================================ use std::sync::Arc; //use actix_session::Session; use actix_web::{web, HttpRequest, HttpResponse}; use config::Config; use localization::TranslationDict; use crate::{ templates, user_settings, {BaseData, Site}, }; /// News page pub async fn news( locale_dict: web::Data>, config: web::Data, request: HttpRequest, ) -> Result { let settings = user_settings::parse(&request); //session::init(&session, &settings); let news = news::get().last_entries(5).cloned().collect::>(); Ok(HttpResponse::Ok().body( render!( templates::base, BaseData::new(&locale_dict, settings, &config.asset_hash, &config) .with_site(Site::News(news)) ) .render(), )) } ================================================ FILE: lib/frontend/src/og_tags.rs ================================================ use crate::unescaped::UnescapedString; use itertools::Itertools; /// Set of tags which can be rendered as HTML #[derive(Clone)] pub struct TagSet { tags: Vec, } #[derive(Clone, PartialEq)] pub struct Tag { pub key: TagKey, pub value: String, } #[derive(Clone, Copy, PartialEq)] pub enum TagKey { Og(TagKeyName), Twitter(TagKeyName), } #[derive(Clone, Copy, PartialEq)] pub enum TagKeyName { Title, Type, Description, URL, Card, } impl TagSet { /// Creates a new empty tag set #[inline] pub(crate) fn new() -> Self { TagSet { tags: vec![] } } /// Creates a new empty tag set with n capacity #[inline] pub(crate) fn with_capacity(cap: usize) -> Self { TagSet { tags: Vec::with_capacity(cap), } } /// Adds a new og tag to the `TagSet` #[inline] pub fn add_og>(&mut self, key: TagKeyName, value: S) { let key = TagKey::Og(key); self.add(Tag::new(key, value)) } /// Adds a new twitter tag to the `TagSet` #[inline] pub fn add_twitter>(&mut self, key: TagKeyName, value: S) { let key = TagKey::Twitter(key); self.add(Tag::new(key, value)) } /// Adds a tag to the `TagSet` #[inline] pub fn add(&mut self, tag: Tag) { self.tags.push(tag); } /// Sets the value of an og tag. Returns `None` if no og tag with `key` found #[inline] pub fn set_og_tag>(&mut self, key: TagKeyName, value: S) -> Option<()> { self.set_tag(TagKey::Og(key), value) } /// Sets the value of a twitter tag. Returns `None` if no twitter tag with `key` found #[inline] pub fn set_twitter_tag>(&mut self, key: TagKeyName, value: S) -> Option<()> { self.set_tag(TagKey::Twitter(key), value) } /// Sets the value of a tag. Returns `None` if no tag with `key` found #[inline] pub fn set_tag>(&mut self, key: TagKey, value: S) -> Option<()> { self.tags.iter_mut().find(|i| i.key == key)?.value = value.as_ref().to_string(); Some(()) } /// Render the `TagSet` #[inline] pub fn render(&self) -> String { self.tags.iter().map(|i| i.render()).join("\n\t") } /// Render the `TagSet` unescaped (for use in HTML) #[inline] pub fn render_unescaped(&self) -> UnescapedString { self.render().into() } } impl Tag { /// Creates a new tag pub fn new>(key: TagKey, value: S) -> Self { let value = value.as_ref().trim().to_string(); Self { key, value } } /// Renders a single tag to HTML #[inline] pub fn render(&self) -> String { let key_attr = match self.key { TagKey::Og(og) => format!("property=\"og:{}\"", og.as_ref()), TagKey::Twitter(twitter) => format!("property=\"twitter:{}\"", twitter.as_ref()), }; format!("", self.value) } } impl TagKey { /// Create a new og tag key #[inline] pub fn new_og(tag_name: TagKeyName) -> Self { TagKey::Og(tag_name) } /// Create a new twitter key #[inline] pub fn new_twitter(tag_name: TagKeyName) -> Self { TagKey::Og(tag_name) } } impl AsRef for TagKeyName { #[inline] fn as_ref(&self) -> &str { match self { TagKeyName::Title => "title", TagKeyName::Type => "type", TagKeyName::Description => "description", TagKeyName::URL => "url", TagKeyName::Card => "card", } } } ================================================ FILE: lib/frontend/src/search_ep.rs ================================================ use super::user_settings; use super::web_error; use crate::{ templates, url_query::{NoJSQueryStruct, QueryStruct}, BaseData, ResultData, }; use actix_web::{web, HttpRequest, HttpResponse}; use config::Config; use localization::TranslationDict; use percent_encoding::percent_decode; use search::SearchExecutor; use search::{ self, query::{Query, UserSettings}, }; use std::{sync::Arc, time::Instant}; use types::jotoba::search::help::SearchHelp; use types::jotoba::search::SearchTarget; /// Endpoint to perform a search pub async fn search_ep_no_js( query_data: web::Query, locale_dict: web::Data>, config: web::Data, request: HttpRequest, ) -> Result { let (query_data, query) = query_data.0.to_query_struct(); search(query, query_data, locale_dict, config, request).await } /// Endpoint to perform a search pub async fn search_ep( query: web::Path, query_data: web::Query, locale_dict: web::Data>, config: web::Data, request: HttpRequest, ) -> Result { let query = percent_decode(query.as_bytes()).decode_utf8()?.to_string(); search(query, query_data.0, locale_dict, config, request).await } async fn search( query: String, query_data: QueryStruct, locale_dict: web::Data>, config: web::Data, request: HttpRequest, ) -> Result { let settings = user_settings::parse(&request); // Parse query and redirect to home on error let query = match query_data .adjust(query.to_string()) .as_query_parser(settings) .parse() { Some(k) => k, None => return Ok(redirect_home()), }; let start = Instant::now(); // Log search duration if too long and available let search_result = do_search(query.target, &locale_dict, settings, &query, &config).await?; log::debug!( "{:?} search for {:?} took {:?}", query.target, query.query_str, start.elapsed() ); Ok(HttpResponse::Ok().body(render!(templates::base, search_result).render())) } /// Run the search and return the `BaseData` for the result page to render async fn do_search<'a>( querytype: SearchTarget, locale_dict: &'a TranslationDict, settings: UserSettings, query: &'a Query, config: &'a Config, ) -> Result, web_error::Error> { let mut base_data = BaseData::new(locale_dict, settings, &config.asset_hash, &config); let result_data = match querytype { SearchTarget::Kanji => kanji_search(&mut base_data, &query).await, SearchTarget::Sentences => sentence_search(&mut base_data, &query).await, SearchTarget::Names => name_search(&mut base_data, &query).await, SearchTarget::Words => word_search(&mut base_data, &query).await, }?; let mut search_help: Option = None; if result_data.is_empty() { let query = query.to_owned(); search_help = web::block(move || search::build_help(querytype, &query)).await?; } Ok(base_data.with_search_result(query, result_data, search_help)) } type SResult = Result; /// Perform a sentence search async fn sentence_search<'a>(base_data: &mut BaseData<'a>, query: &'a Query) -> SResult { let q = query.to_owned(); //let result = web::block(move || search::sentence::Search::new(&q).search()).await??; let result = web::block(move || { let s = search::sentence::Search::new(&q); search::SearchExecutor::new(s).run() }) .await?; base_data.with_pages(result.total as u32, query.page as u32); Ok(ResultData::Sentence(result)) } /// Perform a kanji search async fn kanji_search<'a>(base_data: &mut BaseData<'a>, query: &'a Query) -> SResult { let q = query.to_owned(); let result = web::block(move || search::kanji::search(&q)).await??; base_data.with_cust_pages( result.total_len as u32, query.page as u32, query.settings.page_size, 400, ); Ok(ResultData::KanjiInfo(result.items)) } /// Perform a name search async fn name_search<'a>(base_data: &mut BaseData<'a>, query: &'a Query) -> SResult { let q = query.to_owned(); let result = web::block(move || { let search = search::name::Search::new(&q); SearchExecutor::new(search).run() }) .await?; base_data.with_pages(result.total as u32, query.page as u32); Ok(ResultData::Name(result.items)) } /// Perform a word search async fn word_search<'a>(base_data: &mut BaseData<'a>, query: &'a Query) -> SResult { let q = query.to_owned(); let result = web::block(move || { let search = search::word::Search::new(&q); SearchExecutor::new(search).run() }) .await?; base_data.with_pages(result.total as u32, query.page as u32); Ok(ResultData::Word(result)) } pub(crate) fn redirect_home() -> HttpResponse { HttpResponse::MovedPermanently() .append_header(("Location", "/")) .finish() } /// Reports a search timeout to sentry #[cfg(feature = "sentry_error")] fn report_timeout(request: &HttpRequest, query: &Query) { use sentry::{protocol::Event, Level}; let msg = format!("{:?}-search \"{}\" timed out", query.type_, query.query); sentry::capture_event(Event { request: Some(sentry_request_from_http(request)), level: Level::Error, message: Some(msg), ..Default::default() }); } /// Build a Sentry request struct from the HTTP request #[cfg(feature = "sentry_error")] fn sentry_request_from_http(request: &HttpRequest) -> sentry::protocol::Request { use sentry::protocol::Request; let sentry_req = Request { url: format!( "{}://{}{}", request.connection_info().scheme(), request.connection_info().host(), request.uri() ) .parse() .ok(), method: Some(request.method().to_string()), headers: request .headers() .iter() .map(|(k, v)| (k.to_string(), v.to_str().unwrap_or_default().to_string())) .collect(), ..Default::default() }; sentry_req } #[cfg(feature = "sentry_error")] fn log_duration(search_type: SearchTarget, duration: Duration) { sentry::capture_message( format!("{:?}-search took: {:?}", search_type, duration).as_str(), sentry::Level::Warning, ); } ================================================ FILE: lib/frontend/src/search_help.rs ================================================ use types::jotoba::{ language::Language as ResLanguage, search::{guess::Guess, QueryType}, }; /// Structure containing information for better search help in case no item was /// found in a search #[derive(Clone, Default, Debug)] pub struct SearchHelp { pub words: Option, pub names: Option, pub sentences: Option, pub kanji: Option, pub other_langs: Vec, } impl SearchHelp { /// Returns `true` if `SearchHelp` is not helpful at all (empty) pub fn is_empty(&self) -> bool { self.iter_items().next().is_none() } /// Returns an iterator over all (QueryType, Guess) pairs that have a value pub fn iter_items(&self) -> impl Iterator { let types = &[ (self.words, QueryType::Words), (self.names, QueryType::Names), (self.sentences, QueryType::Sentences), (self.kanji, QueryType::Kanji), ]; types .iter() .filter_map(|i| i.0.is_some().then(|| (i.1, i.0.unwrap()))) .filter(|i| i.1.value != 0) .collect::>() .into_iter() } pub fn iter_langs(&self) -> impl Iterator + '_ { self.other_langs .iter() .map(|lang| (*lang, lang.to_query_format())) } } ================================================ FILE: lib/frontend/src/session.rs ================================================ /* use actix_session::Session; use search::query::UserSettings; // Initializes the session. Returns a session id if user didn't opt out pub(super) fn init(session: &Session, settings: &UserSettings) -> Option { None // User opted out if !settings.cookies_enabled { session.purge(); return None; } // Reads or generates a new session id let session_id = match session.get::("id").ok()? { Some(v) => v, None => { let new_id = utils::rand_alpha_numeric(30); session.set("id", new_id.clone()).ok()?; new_id } }; Some(session_id) } */ ================================================ FILE: lib/frontend/src/templ_utils.rs ================================================ use itertools::Itertools; use jp_utils::furi::{parse::FuriParser, segment::SegmentRef}; use localization::{traits::Translatable, TranslationDict}; use search::executor::search_result::SearchResult; use types::jotoba::{ kanji::Kanji, language::{param::AsLangParam, Language}, names::Name, words::{filter_languages, sense::Sense, Word}, }; use crate::unescaped::UnescapedString; /// Returns a list of all collocations of a word pub fn get_collocations(word: &Word, lang: impl AsLangParam) -> Vec<(String, String)> { if !word.has_collocations() { return vec![]; } let word_storage = resources::get().words(); let mut words = word .collocations .as_ref() .unwrap() .iter() .filter_map(|i| word_storage.by_sequence(*i)) .cloned() .collect::>(); filter_languages(words.iter_mut(), lang); words .into_iter() .map(|word| { let senses: Vec = word .get_senses_with_en() .into_iter() .flatten() .take(5) .map(|i| i.glosses) .flatten() .map(|i| i.gloss) .collect(); let reading = word.reading.kanji.unwrap_or(word.reading.kana).reading; (reading, senses.join(", ")) }) .collect() } #[inline] pub fn unescaped_string(s: T) -> UnescapedString { UnescapedString::new(s) } /// Returns the transive version of `word` #[inline] pub fn get_transitive_counterpart(word: &Word) -> Option { let seq_id = word.transive_version.as_ref()?.get(); resources::get().words().by_sequence(seq_id).cloned() } /// Returns the intransive version of `word` #[inline] pub fn get_intransitive_counterpart(word: &Word) -> Option { let seq_id = word.intransive_version.as_ref()?.get(); resources::get().words().by_sequence(seq_id).cloned() } /// Returns an example sentences of a `sense` if existing. /// tries to use a sentence written in `language` or falls back to english pub fn ext_sentence( sense: &Sense, language: &Language, ) -> Option<(Vec>, &'static str)> { let sentence = resources::get() .sentences() .by_id(sense.example_sentence?)?; let translation = sentence .translation_for(*language) .or_else(|| sentence.translation_for(Language::English))?; // let furigana = furigana::parse::unchecked(&sentence.furigana); // We check furigana at preprocessing so we can unwrap here. let furigana = FuriParser::new(&sentence.furigana).to_vec().unwrap(); Some((furigana, translation)) } pub fn get_types_humanized( name: &Name, dict: &TranslationDict, lang: localization::language::Language, ) -> String { if let Some(ref n_types) = name.name_type { n_types .iter() .filter_map(|i| (!i.is_gender()).then(|| i.pgettext(dict, "name_type", Some(lang)))) .join(", ") } else { String::from("") } } pub fn word_kanji(res: &SearchResult) -> Vec { search::word::kanji::load_word_kanji_info(&res.items) } pub fn has_kanji(res: &SearchResult) -> bool { !word_kanji(res).is_empty() } ================================================ FILE: lib/frontend/src/unescaped.rs ================================================ use std::{ fmt::Display, io::{self, Write}, }; use crate::templates::ToHtml; /// Unescaped owned String pub type UnescapedString = Unescaped; /// Unescaped &str pub type UnescapedStr<'a> = Unescaped<&'a str>; /// Write something unescaped pub struct Unescaped(T); impl ToHtml for Unescaped { #[inline] fn to_html(&self, out: &mut dyn Write) -> io::Result<()> { write!(out, "{}", self.0) } } impl<'a> ToHtml for Unescaped<&'a str> { #[inline] fn to_html(&self, out: &mut dyn Write) -> io::Result<()> { write!(out, "{}", self.0) } } impl<'a> From<&'a str> for UnescapedStr<'a> { #[inline] fn from(s: &'a str) -> Self { Unescaped(s) } } impl From for UnescapedString { #[inline] fn from(s: String) -> Self { Unescaped(s) } } impl Unescaped { #[inline] pub fn new(t: T) -> Self { Unescaped(t.to_string()) } } impl<'a> Unescaped<&'a str> { #[inline] pub fn new(t: &'a str) -> Self { Unescaped(t) } } impl ToString for Unescaped { #[inline] fn to_string(&self) -> String { self.0.to_string() } } impl Into for Unescaped { #[inline] fn into(self) -> String { (&self).into() } } impl Into for &Unescaped { #[inline] fn into(self) -> String { format!("{}", self.0) } } impl + Display> AsRef for Unescaped { #[inline] fn as_ref(&self) -> &str { self.0.as_ref() } } impl<'a> Unescaped<&'a str> { /// Returns a string reference of the unescaped value #[inline] pub fn as_str(&self) -> &'a str { self.0.as_ref() } } impl Unescaped { /// Returns a string reference of the unescaped value #[inline] pub fn as_str(&self) -> &str { self.0.as_ref() } } ================================================ FILE: lib/frontend/src/url_query.rs ================================================ use std::str::FromStr; use search::{ self, query::{parser::QueryParser, UserSettings}, }; use serde::{Deserialize, Deserializer}; use types::jotoba::{language::Language, search::SearchTarget}; #[derive(Deserialize)] pub struct QueryStruct { #[serde(rename = "t")] pub search_type: Option, #[serde(rename = "i")] pub word_index: Option, #[serde(rename = "p", default = "default_page")] pub page: usize, #[serde(default, rename = "l", deserialize_with = "deserialize_lang")] pub lang_overwrite: Option, #[serde(skip)] pub query_str: String, } impl QueryStruct { /// Adjusts the search query trim and map empty search queries to Option::None. /// Ensures `search_type` is always 'Some()' pub fn adjust(&self, query_str: String) -> Self { let query_str = query_str.trim().to_string(); let page = if self.page == 0 { default_page() } else { self.page }; QueryStruct { query_str, search_type: Some(self.search_type.unwrap_or_default()), page, word_index: self.word_index, lang_overwrite: self.lang_overwrite, } } /// Returns a [`QueryParser`] of the query #[inline] pub fn as_query_parser(&self, user_settings: UserSettings) -> QueryParser { let mut q_parser = QueryParser::new( self.query_str.clone(), self.search_type.unwrap_or_default(), user_settings, ) .with_page(self.page) .with_word_index(self.word_index.unwrap_or_default()); if let Some(lang) = self.lang_overwrite { q_parser = q_parser.with_lang_overwrite(lang); } q_parser } } #[inline] fn default_page() -> usize { 1 } /// Query format for js fallback queries of the format http://127.0.0.1:8080/search?t=0&s=world /// instead of the query being an url parameter #[derive(Deserialize)] pub struct NoJSQueryStruct { #[serde(rename = "s")] pub query: String, #[serde(rename = "t")] pub search_type: Option, #[serde(rename = "i")] pub word_index: Option, #[serde(rename = "p", default = "default_page")] pub page: usize, #[serde(default, rename = "l", deserialize_with = "deserialize_lang")] pub lang_overwrite: Option, } impl NoJSQueryStruct { /// Converts a NoJSQueryStruct into a QueryStruct and the query string pub(crate) fn to_query_struct(self) -> (QueryStruct, String) { let query_struct = QueryStruct { page: self.page, word_index: self.word_index, search_type: self.search_type, query_str: String::new(), lang_overwrite: self.lang_overwrite, }; (query_struct, self.query) } } /// Deserializes a field into a Option. None if invalid lang-str or Deserializing str /// failed fn deserialize_lang<'de, D>(s: D) -> Result, D::Error> where D: Deserializer<'de>, { return Ok(Language::from_str(&String::deserialize(s)?).ok()); } ================================================ FILE: lib/frontend/src/user_settings.rs ================================================ use std::str::FromStr; use actix_web::HttpRequest; use search::query::UserSettings; use types::jotoba::language::Language; /// Parses user settings from a `HttpRequest` pub(super) fn parse(request: &HttpRequest) -> UserSettings { let show_english = request .cookie("show_english") .and_then(|i| i.value().parse().ok()) .unwrap_or_else(|| UserSettings::default().show_english); let user_lang = request .cookie("default_lang") .and_then(|i| Language::from_str(i.value()).ok()) .unwrap_or_default(); let page_lang = request .cookie("page_lang") .and_then(|i| localization::language::Language::from_str(i.value()).ok()) .unwrap_or_default(); let english_on_top = request .cookie("show_english_on_top") .and_then(|i| i.value().parse().ok()) .unwrap_or_else(|| UserSettings::default().english_on_top) && show_english; let items_per_page = request .cookie("items_per_page") .and_then(|i| i.value().parse().ok()) .unwrap_or_else(|| UserSettings::default().page_size); let example_sentences_enabled = request .cookie("show_sentences") .and_then(|i| Some(i.value() == "true")) .unwrap_or_else(|| UserSettings::default().show_example_sentences); let sentence_furigana = request .cookie("sentence_furigana") .and_then(|i| Some(i.value() == "true")) .unwrap_or_else(|| UserSettings::default().sentence_furigana); UserSettings { user_lang, show_english, english_on_top, page_lang, page_size: items_per_page, show_example_sentences: example_sentences_enabled, sentence_furigana, ..Default::default() } } ================================================ FILE: lib/frontend/src/web_error.rs ================================================ use actix_web::{error::BlockingError, http::StatusCode, HttpResponse, ResponseError}; #[cfg(not(feature = "sentry_error"))] use log::error; use crate::templates; #[derive(Debug)] pub enum Error { Internal, NotFound, SearchTimeout, BadRequest, } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } /// Informatin to print on the error page pub struct InfoText { pub primary: &'static str, pub secondary: &'static str, } // Treat all crate::error::Error as Internal error impl From for Error { fn from(err: error::Error) -> Self { #[cfg(feature = "sentry_error")] sentry::capture_error(&err); #[cfg(not(feature = "sentry_error"))] error!("{}", err); Self::Internal } } impl ResponseError for Error { fn status_code(&self) -> StatusCode { match self { Error::Internal => StatusCode::INTERNAL_SERVER_ERROR, Error::NotFound => StatusCode::NOT_FOUND, Error::SearchTimeout => StatusCode::REQUEST_TIMEOUT, Error::BadRequest => StatusCode::BAD_REQUEST, } } fn error_response(&self) -> HttpResponse { // Render the error template HttpResponse::Ok().body( render!( templates::error_page, self.status_code().as_u16(), self.get_info_text() ) .render(), ) } } impl From for Error { fn from(_: std::str::Utf8Error) -> Self { Self::BadRequest } } impl From for Error { #[inline] fn from(_: BlockingError) -> Self { Self::Internal } } impl Error { /// Return an [`InfoText`] based on the error suitable for displaying on the error site fn get_info_text(&self) -> InfoText { let (primary, secondary) = { match self { Error::Internal => ("Sorry", "try again later"), Error::NotFound => ("The page", "was not found"), Error::SearchTimeout => ("Search", "timed out"), Error::BadRequest => ("Bad request", ""), } }; InfoText { primary, secondary } } } /// Not found error handler pub async fn not_found() -> Result { Err(Error::NotFound) } ================================================ FILE: lib/frontend/templates/base.rs.html ================================================ @use super::subtemplates::{head_html, input_dropdown_html, main_body_html, footer_html}; @use super::overlays::{page_overlays_html, search_overlays_html, mobile_overlays_html}; @use crate::BaseData; @(data: BaseData) @:head_html(&data)

home
@:input_dropdown_html(&data)
@:search_overlays_html(&data)
@:main_body_html(&data) @:page_overlays_html(&data) @:mobile_overlays_html(&data)
@:footer_html(&data, false) ================================================ FILE: lib/frontend/templates/base_index.rs.html ================================================ @use super::subtemplates::{head_html, input_dropdown_html, footer_html}; @use super::overlays::{page_overlays_html, search_overlays_html}; @use crate::BaseData; @(data: BaseData) @:head_html(&data)
@:input_dropdown_html(&data)
@:search_overlays_html(&data)
@:page_overlays_html(&data)
@:footer_html(&data, true) ================================================ FILE: lib/frontend/templates/error_page.rs.html ================================================ @use crate::web_error::InfoText; @(status_code: u16, info: InfoText) Jotoba

@status_code


ごめんなさい
@info.primary @info.secondary
Go back or create an issue on our Github page:
================================================ FILE: lib/frontend/templates/functional/render_sentence.rs.html ================================================ @use jp_utils::furi::segment::SegmentRef; @use jp_utils::furi::segment::AsSegment; @* TODO Figure out how to use Iterator instead of Vec for this template. *@ @type MyVec<'a> = Vec>; @(iter: MyVec, addl_classes: &str, show_furigana: bool)
@* *@@for furi_part in iter {@* *@@for r in furi_part.reading_iter() {@* *@@if !furi_part.is_empty() {@* *@@if furi_part.is_kanji() {@* *@@* *@@* *@@r.kanji().unwrap()@* *@@* *@@if show_furigana {@* *@@* *@@* *@@r.kana()@* *@@* *@@* *@}@* *@@* *@} else {@* *@@* *@@r.kanji().unwrap_or_else(|| r.kana())@* *@@* *@}@* *@}@* *@}@* *@}@* *@
================================================ FILE: lib/frontend/templates/overlays/info/collocations.rs.html ================================================ @use crate::BaseData; @use types::jotoba::words::Word; @use crate::{templ_utils::*}; @(data: &BaseData, word: &Word) ================================================ FILE: lib/frontend/templates/overlays/info/definitions_jp.rs.html ================================================ @use crate::BaseData; @use types::jotoba::words::Word; @(_data: &BaseData, word: &Word) ================================================ FILE: lib/frontend/templates/overlays/info/inflections.rs.html ================================================ @use crate::BaseData; @use types::jotoba::words::{Word, inflection::Inflections}; @(data: &BaseData, word: &Word, inflections: &Inflections) ================================================ FILE: lib/frontend/templates/overlays/mobile_overlays.rs.html ================================================ @use crate::BaseData; @(_data: &BaseData) ================================================ FILE: lib/frontend/templates/overlays/page/decomposition_graph.rs.html ================================================ @use crate::BaseData; @(_data: &BaseData) ================================================ FILE: lib/frontend/templates/overlays/page/image_crop.rs.html ================================================ @use crate::BaseData; @(data: &BaseData) ================================================ FILE: lib/frontend/templates/overlays/page/loading.rs.html ================================================ @use crate::BaseData; @(_data: &BaseData)
================================================ FILE: lib/frontend/templates/overlays/page/settings.rs.html ================================================ @use crate::BaseData; @(data: &BaseData) ================================================ FILE: lib/frontend/templates/overlays/page_overlays.rs.html ================================================ @use super::page::{image_crop_html, loading_html, settings_html}; @use crate::BaseData; @(data: &BaseData) @:image_crop_html(data) @:loading_html(data) @:settings_html(data) ================================================ FILE: lib/frontend/templates/overlays/search_overlays.rs.html ================================================ @use super::searchbar::{image_input_html, radicals_html, speech_html, suggestions_html}; @use crate::BaseData; @(data: &BaseData) @:image_input_html(data) @:radicals_html(data) @:speech_html(data) @:suggestions_html(data) ================================================ FILE: lib/frontend/templates/overlays/searchbar/image_input.rs.html ================================================ @use crate::BaseData; @(data: &BaseData) ================================================ FILE: lib/frontend/templates/overlays/searchbar/radicals.rs.html ================================================ @use crate::BaseData; @(data: &BaseData) ================================================ FILE: lib/frontend/templates/overlays/searchbar/speech.rs.html ================================================ @use crate::BaseData; @(data: &BaseData) ================================================ FILE: lib/frontend/templates/overlays/searchbar/suggestions.rs.html ================================================ @use crate::BaseData; @(_data: &BaseData) ================================================ FILE: lib/frontend/templates/pages/about.rs.html ================================================ @use crate::BaseData; @(data: &BaseData)

@data.gettext("About")

@data.gettext("Jotoba is a multilingual Japanese dictionary. It is easy to find translations for words or kanji, see example sentences and the way names can be written.")

@data.gettext("Jotoba is open source. Check out our") @data.gettext("Github page") @data.gettext("if you want to contribute or host Jotoba yourself.")
@data.gettext("Check out our") @data.gettext("Trello Board") @data.gettext("aswell if you are interested in upcoming features and what we are currently working on!")

@data.gettext("Data Sources and Inspiration")

@data.gettext("Of course this project wouldn't have been possible without the help of some great data sources.")
@data.gettext("Many thanks to every one of them for providing such a variety of data people can use to learn the japanese language.")

@data.gettext("Joto-kun")

@data.gettext("Joto-kun was created by a good friend of ours who is truly a wizard when it comes down to design!")


@data.gettext("Jisho")

@data.gettext("Jisho, created by Kim Ahlström, Miwa Ahlström and Andrew Plummer is a pretty and powerful english-japanese dictionary.")
@data.gettext("We took inspiration in their work and design to improve on their concept and offer it to a wider variety of people.")


@data.gettext("Words") & @data.gettext("Kanji") & @data.gettext("Names")

@data.gettext("Words (except sound effects), Kanji and Names available on this site are publicly provided and maintained by")
EDRDG (Electronic Dictionary Research and Development Group) @data.gettext("and available under the license")
Creative Commons Attribution-ShareAlike Licence (V3.0).
@data.gettext("Additionally, the RADKFILE by Jin Breen is used to link Radicals to Kanji.")


@data.gettext("Audio Files") #1 @data.gettext("and") @data.gettext("Audio Files") #2

@data.gettext("The audio files #1 were graciously made public by") @data.gettext("WaniKani") & @data.gettext("Tofugo") @data.gettext("and uploaded to Github under the CC-BY-4.0 licence.")
@data.gettext("The audio files #2 are provided by the") @data.gettext("Kanji alive project") @data.gettext("and are also available under the CC-BY-4.0 license.")


@data.gettext("Manga Sound Effects")

@data.gettext("The data about Sound Effects is graciously provided by Chris Kincaid and is used as additional data in the word search.")


@data.gettext("Sentences")

@data.gettext("Sentences are provided by Tatoeba under the Creative Commons CC 1.0 and 2.0 licences.")


@data.gettext("Kanji Animations")

@data.gettext("The raw data used for kanji animations is publicly provided by KanjiVG, a project by Ulrich Apel.")
@data.gettext("The conversion into images and animated SVG is done by a ruby script which was made by") @data.gettext("Kimtaro") @data.gettext("and altered by") @data.gettext("Yukáru").


@data.gettext("JLPT Data")

@data.gettext("Data about JLPT proficiencies are by provided by Jonathan Waller.")
@data.gettext("There is also some non-free data available on his website, so check it out if you are interested.")


@data.gettext("Word tokenization")

@data.gettext("Word tokenization is done using UniDic, by the UniDic Consortium and used for Japanese morphological analysis implementations.")


@data.gettext("Radicals")

@data.gettext("Data about Radicals used in specific Kanji are provided by Kanjium.")
@data.gettext("On the project's Github Page you can find lots of data about Kanji.")


@data.gettext("Pitch accents")

@data.gettext("Pitch accent data has been extracted from UniDic.")

================================================ FILE: lib/frontend/templates/pages/info.rs.html ================================================ @use crate::BaseData; @(data: &BaseData)

@data.gettext("Shortcuts")

@data.gettext("To improve the quality of life on Jotoba, we offer some shortcuts to quickly navigate the page:")

@data.gettext("Everywhere")
w | s | n | k @data.gettext("Quickly change between words | sentences | names | kanji tabs")
/ @data.gettext("Focus the search bar")
@data.gettext("Focussed search bar")
↑ | ↓ @data.gettext("Iterate suggestions up | down")
[Tab] @data.gettext("Iterate suggestions down")
@data.gettext("[Words] search")
p @data.gettext("Play the first possible audio")
@data.gettext("[Kanji] search")
c @data.gettext("Show / Collapse compounds")

@data.gettext("Hashtags")

@data.gettext("To specify what kind of results your search should offer, you can use shortcuts.")
@data.gettext("Hashtags should be written at end end of your input like this:") start #noun

@data.gettext("Available Hashtags for [Words] search")+
#noun @data.gettext("Search for nouns")
#verb @data.gettext("Search for verbs")
#transitive @data.gettext("Search for transitive verbs")
#intransitive @data.gettext("Search for intransitive verbs")
#adverb @data.gettext("Search for adverbs")
#auxilary @data.gettext("Search for auxilary verbs")
#adjective @data.gettext("Search for adjectives")
#pronoun @data.gettext("Search for pronouns")
#conjungation @data.gettext("Search for conjugations")
#prefix @data.gettext("Search for prefixes")
#suffix @data.gettext("Search for suffixes")
#particle @data.gettext("Search for japanese particles")
#Irregular-Ichidan @data.gettext("Lists iru/eru ending verbs which are conjugated as godan verbs")
#sfx @data.gettext("Search for sfx words [comic sounds]")
#counter @data.gettext("Search for words used for counting")
#expression @data.gettext("Search for expressions")
#interjection @data.gettext("Search for words used as interjections")
#numeric @data.gettext("Search for numeric words")
#abbreviation @data.gettext("Search for abbreviations")
#katakana @data.gettext("Search for katakana words")
#unclassified @data.gettext("Search for words that don't fit in any category")
#N [5-1] @data.gettext("Search for words included in the specific JLPT level")
#word @data.gettext("Search in the [words] category")
#sentence @data.gettext("Search in the [sentences] category")
#name @data.gettext("Search in the [name] category")
#kanji @data.gettext("Search in the [kanji] category")
@data.gettext("Available Hashtags for [Sentence] search")
#N [1-5] @data.gettext("Search for sentences included in the specific JLPT level")
#hidden @data.gettext("Hide translations by default to translate them yourself and check if its correct")
@data.gettext("Available Hashtags for [Kanji] search")
#Genki [3-23] @data.gettext("Search for kanji included in the specific Genki chapter")

@data.gettext("Radical search")

@data.gettext("The radical picker allows searching for radicals to make the process of picking radicals even faster. The supported inputs are as following:")

Kanji @data.gettext("Results in all radicals used to build given kanji characters")
Kana @data.gettext("Searches in words for the given query and returns in result-matching radicals")
Romaji @data.gettext("Tries to find the given query in radicals names, otherwise does a word search and returns the result's kanji")
================================================ FILE: lib/frontend/templates/pages/kanji.rs.html ================================================ @use search::kanji::result::Item; @use crate::BaseData; @use super::search_help; @use crate::templ_utils::*; @use crate::templates::overlays::page::{decomposition_graph_html}; @(data: &BaseData, kanji: Vec) @if kanji.is_empty() { @:search_help(&data, data.gettext("kanji").as_str()) } @:decomposition_graph_html(data) @for (kpos, k_item) in kanji.iter().enumerate() { @if kpos > 0 {
}
@k_item.kanji.literal
@data.ngettext_fmt("{} stroke", k_item.kanji.stroke_count as u64, &[k_item.kanji.stroke_count])
@data.gettext("Radical"):
@k_item.get_radical() @if k_item.get_rad_len() > 7 {
} @if let Some(ref translations) = k_item.kanji.radical.translations { (@translations.join(", ")) }
@if !k_item.kanji.parts.is_empty() {
@data.ngettext("Part", k_item.get_parts_count() as u64):
@for (pos, part) in k_item.kanji.parts.iter().enumerate() { @if pos > 0 { , } @part }
}
@k_item.kanji.meanings.join(", ")
@if !k_item.kanji.kunyomi.is_empty() {
@data.gettext("Kun"):
@for (pos, kun) in k_item.kanji.kunyomi.iter().enumerate() { @if pos > 0 {
,
} @kun }
} @if !k_item.kanji.onyomi.is_empty() {
@data.gettext("On"):
@for (pos, on) in k_item.kanji.onyomi.iter().enumerate() { @if pos > 0 {
,
} @on }
}
@if let Some(grade) = k_item.kanji.grade {
@data.gettext_fmt("Taught in {} grade", &[grade])
} @if let Some(n_lvl) = k_item.kanji.jlpt {
@data.gettext("JLPT level") N@n_lvl
} @if let Some(frequency) = k_item.kanji.frequency {
@frequency @data.gettext("of 2500 most used kanji in newspapers")
}
@if !k_item.kanji.nanori.is_empty() {
@data.gettext("Japanese names"): @for (pos, n) in k_item.kanji.nanori.iter().enumerate() { @if pos > 0 {
,
} @n }
} @if !k_item.kanji.similar_kanji.is_empty() {
@data.gettext("Similar Kanji"): @for (pos, n) in k_item.kanji.similar_kanji.iter().enumerate() { @if pos > 0 {
,
} @n }
} @if !k_item.kanji.chinese.is_empty() {
@data.gettext("Chinese reading"): @k_item.kanji.chinese.join(", ")
} @if let Some(ref korean) = k_item.get_korean() {
@data.gettext("Korean reading"):   @korean.join(", ")
} @if !k_item.kanji.vietnamese.is_empty() {
@data.gettext("Vietnamese reading"):   @k_item.kanji.vietnamese.join(", ")
}
@if k_item.kanji.has_stroke_frames(data.assets_path()) {
@unescaped_string(k_item.get_frames(data.assets_path()).unwrap_or_default())
} @if k_item.kanji.has_animation_file(data.assets_path()) {
Animation Speed:
100%
@unescaped_string(k_item.get_animation(data.assets_path()).unwrap_or_default())
} @if k_item.kanji.has_compounds() {
@if data.kanji_copounds_collapsed() {
}else{
}
}else{
} @if data.kanji_copounds_collapsed() {
} ================================================ FILE: lib/frontend/templates/pages/names.rs.html ================================================ @use types::jotoba::names::Name; @use super::search_help; @use crate::templ_utils::get_types_humanized; @use crate::BaseData; @(data: &BaseData, names: Vec<&Name>)
@if names.is_empty() { @:search_help(&data, data.gettext("names").as_str()) } @for name in names.iter() {
@if let Some(ref kanji) = name.kanji {
@kanji
}
@if name.kanji.is_some() { 【@name.kana】 } else { @name.kana }
@data.gettext("Full name")
@name.transcription
@if name.name_type.is_some() { @if let Some(gender) = name.get_gender() {
@data.gettext("Sex")
@data.gettext(gender)
} @if name.has_non_gender_tags() {
@data.gettext("Name origin")
@get_types_humanized(name, data.dict, data.get_lang())
} }

}
================================================ FILE: lib/frontend/templates/pages/news.rs.html ================================================ @use crate::BaseData; @use news::NewsEntry; @type News = Vec; @(data: &BaseData, news: News)
================================================ FILE: lib/frontend/templates/pages/search_help.rs.html ================================================ @use crate::BaseData; @(data: &BaseData, title: &str)

@data.gettext_fmt("No {} found", &[title])

@if let Some(search_help) = data.get_search_help() { @if !search_help.other_langs.is_empty() {

@data.gettext("Found in multiple other languages")

@for (lang, l_code) in search_help.iter_langs() { - @data.gettext(lang) } }

@data.gettext("Found in other search types")

@for (query_type, guess) in search_help.iter_items() { } }

@data.gettext("Search Help")

  • @data.gettext("Your default search language might not fit your input")
  • @data.gettext("Check your search for typos")
  • @data.gettext("Use more generic search terms")
  • @data.gettext("Try finding your search in a different category using") w / s / n / k
  • @data.gettext("Your search request might not be included in our database yet")

@data.gettext("If you think your search should be contained in our database, submit an issue on") Github.
@data.gettext("Also check our") Trello Board @data.gettext("since we might be working on it!")

================================================ FILE: lib/frontend/templates/pages/sentences.rs.html ================================================ @use search::sentence::result::{Sentence, ResData}; @use search::executor::search_result::SearchResult; @use super::search_help; @use crate::templates::functional::{render_sentence_html}; @use crate::BaseData; @(data: &BaseData, sentences: SearchResult)
@if sentences.items.is_empty() { @:search_help(&data, data.gettext("sentences").as_str()) } @for sentence in sentences.items {
@:render_sentence_html(sentence.furigana_pairs(), "small", data.user_settings.sentence_furigana)
@sentence.translation
@if let Some(ref english) = sentence.get_english() {
@english
} @if sentences.other_data.hidden {
[@data.gettext("show")]
}

}
================================================ FILE: lib/frontend/templates/pages/words.rs.html ================================================ @use search::word::result::{selected, AddResData}; @use types::jotoba::words::Word; @use types::jotoba::language::Language; @use search::executor::search_result::SearchResult; @use search::query::Query; @use jp_utils::furi::parse::FuriParser; @use crate::templ_utils::*; @use super::{search_help}; @use crate::templates::functional::{render_sentence_html}; @use crate::templates::overlays::info::*; @use crate::BaseData; @(data: &BaseData, query: &Query, result: SearchResult) @if let Some(ref inflection) = result.inflection {
@data.ngt_search_links("{} could be an inflection of {}, with this form:", 1, &[&result.other_data.raw_query, &inflection.lexeme], inflection.inflections.len() as u64)
@for inflection in inflection.inflections.iter() {
  • @data.gettext(*inflection)
  • }
    @if result.has_sentence() {
    }
    } @if let Some(sentence_parts) = result.sentence_parts() {
    @for part in sentence_parts.iter() { @if let Some(furigana) = part.furigana() { @:render_sentence_html(FuriParser::new(furigana).to_vec().unwrap(), "", true) } else {
    @part.get_inflected()  
    } }
    @for part in sentence_parts.iter() { @if let Some(ref info) = part.word_class() {
    @data.gettext(*info)
    } }

    }
    @* @if let Some(new_lang) = result.changed_lang { @data.gettext_fmt("Temporarily switched language to {}", &[data.gettext(new_lang)]) }*@ @if !result.is_empty() {

    @data.gettext("Words")

    } else { @:search_help(&data, data.gettext("words").as_str()) } @for word in result.items.iter() { @if word.get_reading().len() > 3 {
    @if let Some(s_pairs) = word.get_furigana() { @:render_sentence_html(s_pairs, "", true) } else {
    @word.get_reading().reading
    }
    }
    @if word.get_reading().len() <= 3 {
    @if let Some(s_pairs) = word.get_furigana() { @:render_sentence_html(s_pairs, "", true) } else {
    @word.get_reading().reading
    }
    } @if word.is_common() {
    @data.gettext("common word")
    } @if let Some(lvl) = word.jlpt_lvl {
    @data.gettext_fmt("JLPT N{}", &[lvl])
    } @if let Some(audio) = word.audio_file(data.config.server.get_audio_files()) { @data.gettext("Play audio") }
      @if word.get_inflections().is_some() {
    • @data.gettext("Show Conjugations")

    • } @if let Some(ref collocations) = word.collocations {
    • @data.ngettext("Show collocation", collocations.len() as u64)

    • } @* @if
    • @data.gettext("Japanese definitions")

    • @if_end *@ @if let Some(intransitive) = get_intransitive_counterpart(&word) {
    • @data.gettext("Intransitive word")

    • } @if let Some(transitive) = get_transitive_counterpart(&word) {
    • @data.gettext("Transitive word")

    • } @if word.has_sentence(data.user_settings.user_lang) || (data.user_settings.show_english && word.has_sentence(Language::English)) {
    • @data.gettext("Sentence search")
    • } @if let Some(audio) = word.audio_file(data.config.server.get_audio_files()) {

    • @data.gettext("Download audio")
    • } @if data.config.is_debug() {

    • Seq: @word.sequence
    • }

    • @data.gettext("Direct reference")
    @for (spos, senses_compound) in word.get_senses_orderd(query.settings.english_on_top, query.settings.user_lang).iter().enumerate() { @for (pos, sense) in senses_compound.iter().enumerate() { @if !sense.glosses.is_empty() {
    @sense.get_parts_of_speech(&data.dict, data.get_lang())
    @(pos+1).
    @sense.get_glosses()
    @if let Some((info, xref, antonym, dialect, gairaigo)) = sense.get_infos(&data.dict, data.get_lang()) {
    @if let Some(ref gro) = gairaigo { @gro } @if let Some(ref info) = info { @if gairaigo.is_some() { . } @info } @if let Some(ref dialect) = dialect { @data.gettext_custom(*dialect) } @if let Some(ref antonym) = antonym { @data.gt_search_link("Antonym of {}", antonym) } @if let Some(ref xref) = xref { @if antonym.is_some() || dialect.is_some() { . } @data.gt_search_link("See also {}", xref) }
    } @if query.settings.show_example_sentences { @if let Some((furi, translation)) = ext_sentence(sense, &query.settings.user_lang) {
    @translation
    } }
    } } @if spos == 0 && !word.get_senses_with_en()[1].is_empty() && !senses_compound.is_empty() {
    } } @if let Some(pitch) = word.get_first_pitch() {
    @data.gettext("Pitch accent")
    @for (classes, kana) in pitch.render() { @kana }
    } @if !word.reading.alternative.is_empty() {
    @data.gettext("Other forms")
    @word.alt_readings_beautified()
    }
    @if word.has_collocations() { @:collocations_html(&data, &word) } @if let Some(inflections) = word.get_inflections() { @:inflections_html(&data, &word, &inflections) } @* @if @@_get_me_some_html! @:definitions_jp(&data, &word) @if_end *@
    }
    ================================================ FILE: lib/frontend/templates/subtemplates/footer.rs.html ================================================ @use super::{paginator}; @use crate::BaseData; @use resources::GIT_HASH; @(data: &BaseData, show_ref: bool)
    @:paginator(data) @if show_ref {
    Donations will always be shared between the developers!
    }
    @data.gettext("Jotoba wouldn't be able to exist without the help of many open-source data sources.")
    @data.gettext("Check out the") @data.gettext("About Page") @data.gettext("for a list of all contributors in this project.")
    @if data.config.is_debug() {

    Git hash: @(GIT_HASH) }
    ================================================ FILE: lib/frontend/templates/subtemplates/head.rs.html ================================================ @use crate::BaseData; @(data: &BaseData) @data.gettext("Jotoba") @if let Some(tags) = data.get_og_tags() { @tags.render_unescaped() } ================================================ FILE: lib/frontend/templates/subtemplates/input_dropdown.rs.html ================================================ @use crate::BaseData; @(data: &BaseData)
    @data.get_search_site_name()
    @data.gettext("Words")
    @data.gettext("Kanji")
    @data.gettext("Sentences")
    @data.gettext("Names")
    ================================================ FILE: lib/frontend/templates/subtemplates/main_body.rs.html ================================================ @use crate::templates::pages::{words_html, kanji_html, names_html, sentences_html, about_html, info_html, news_html}; @use crate::{BaseData, ResultData, Site}; @(data: &BaseData)

    @match data.site.clone() { Site::SearchResult(search_result) => { @match search_result.result { ResultData::Word(result) => { @:words_html(&data, &search_result.query, result) } ResultData::KanjiInfo(result) => { @:kanji_html(&data, result) } ResultData::Name(result) => { @:names_html(&data, result) } ResultData::Sentence(result) => { @:sentences_html(&data, result) } } } Site::Index => { @*@:index_html(&data)*@ } Site::About => { @:about_html(&data) } Site::InfoPage => { @:info_html(&data) } Site::News(news) => { @:news_html(&data, news) } }
    ================================================ FILE: lib/frontend/templates/subtemplates/paginator.rs.html ================================================ @use crate::BaseData; @(data: &BaseData) @if let Some(pagination) = data.pagination { } ================================================ FILE: lib/indexes/Cargo.toml ================================================ [package] name = "indexes" version = "0.1.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] types = { path = "../types" } autocompletion = { git = "https://github.com/WeDontPanic/AutoCompletionFramework" } #autocompletion = { path = "../../../AutoCompletionFramework" } bktree = { git = "https://github.com/JojiiOfficial/bktree" } serde = { version = "1.0.171", features = ["derive"] } byteorder = "1.4.3" bitflags = { git = "https://github.com/JojiiOfficial/BitFlags" } once_cell = { version = "1.18.0", default-features = false } bincode = "1.3.3" log = "0.4.19" rayon = { version = "1.7.0", optional = true } qp-trie = { git = "https://github.com/sdleffler/qp-trie-rs", features = [ "serde", ] } #ngindex = { path = "../../../ngindex" } ngindex = { git = "https://github.com/JojiiOfficial/ngindex" } #vsm = { path = "../../../vsm"} vsm = { git = "https://github.com/JojiiOfficial/VSM"} index_framework = { git = "https://github.com/WeDontPanic/index_framework"} ngram-tools = { git = "https://github.com/JojiiOfficial/ngram-tools"} #sparse_vec = { path = "../../../sparse_vec"} sparse_vec = { git = "https://github.com/JojiiOfficial/SparseVec"} num-traits = "0.2.15" [dev-dependencies] test-case = '*' [features] default = [] parallel = ["rayon"] ================================================ FILE: lib/indexes/src/hashtag.rs ================================================ use ngindex::{ index_framework::retrieve::retriever::{ngram::NGramRetriever, Retriever}, NgramIndex, }; use qp_trie::{wrapper::BString, Trie}; use serde::{Deserialize, Serialize}; use types::jotoba::{indexes::hashtag::RawHashtag, search::SearchTarget}; /// Index for hashtag auto completion #[derive(Deserialize, Serialize)] pub struct HashTagIndex { tags: Vec, pub index: NgramIndex<2, u32>, trie: Trie, } impl HashTagIndex { /// Create a new HashTagIndex pub fn new(tags: Vec, index: NgramIndex<2, u32>, trie: Trie) -> Self { Self { tags, index, trie } } #[inline] pub fn get(&self, pos: usize) -> Option<&RawHashtag> { self.tags.get(pos) } #[inline] pub fn get_filtered(&self, pos: usize, s_targets: &[SearchTarget]) -> Option<&RawHashtag> { let tag = self.get(pos)?; if s_targets.iter().any(|i| tag.s_targets.contains(i)) || s_targets.is_empty() { return Some(tag); } None } #[inline] pub fn trie_search(&self, query: &str, s_targets: &[SearchTarget]) -> Vec<&RawHashtag> { let id = self.trie.subtrie_str(&query.to_lowercase()); let mut out = vec![]; for (_, id) in id.iter() { if let Some(v) = self.get_filtered(*id as usize, s_targets) { out.push(v); } } out } pub fn ngram_search(&self, query: &str, s_targets: &[SearchTarget]) -> Vec<(&RawHashtag, f32)> { let mut posts: Vec<_> = s_targets.iter().map(|i| i.get_type_id() as u32).collect(); if posts.is_empty() { posts = vec![0, 1, 2, 3]; } let retr = self .index .retriever_for(query) .in_postings(posts) .unique() .get::>(); let q = retr.q_term_ids().to_vec(); let mut out = retr .filter_map(|i| { let item = self.get(*i.item() as usize)?; let dice = i.terms().dice_weighted(&q, 0.5); Some((item, dice)) }) .filter(|i| i.1 > 0.2) .collect::>(); out.sort_by(|a, b| a.1.total_cmp(&b.1).reverse()); out } } ================================================ FILE: lib/indexes/src/kanji/mod.rs ================================================ pub mod reading; pub mod reading_freq; ================================================ FILE: lib/indexes/src/kanji/reading.rs ================================================ use index_framework::backend::memory::presets::Simple; // Index shortcuts pub type Index = Simple; ================================================ FILE: lib/indexes/src/kanji/reading_freq/k_freq_item.rs ================================================ use super::reading::ReadingFreq; use serde::{Deserialize, Serialize}; /// All reading data for a single kanji #[derive(Serialize, Deserialize, Debug)] pub struct KFreqItem { pub readings: Vec, pub total: usize, } impl KFreqItem { /// Creates a new Kanji frequency item with the provided readings pub fn new(readings: Vec) -> Self { let readings = readings .into_iter() .map(|i| ReadingFreq::new(i)) .collect::>(); Self { readings, total: 0 } } /// Get the total amount of counted readings for a kanji #[inline] pub fn total(&self) -> usize { self.total } /// Increase the total value of counted readings for a kanji #[inline] pub fn inc_total(&mut self, add: usize) { self.total += add } /// Returns `true` if the kanji readings are completely empty pub fn is_empty(&self) -> bool { self.readings.is_empty() || (self.readings.iter().all(|i| i.is_empty()) && self.total == 0) } /// Gets all reading freq items that match the given matcher #[inline] pub fn get_readings<'a, F: Fn(&str) -> bool>( &'a self, r: F, ) -> impl Iterator { self.readings.iter().filter(move |i| r(&i.reading)) } /// Gets a reading freq item with the given string #[inline] pub fn get_reading>(&self, s: S) -> Option<&ReadingFreq> { self.readings.iter().find(|i| i.reading == s.as_ref()) } } ================================================ FILE: lib/indexes/src/kanji/reading_freq/mod.rs ================================================ pub mod k_freq_item; pub mod reading; use self::k_freq_item::KFreqItem; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use types::jotoba::kanji::Kanji; /// An index that can hold kanji along with their various readings which this lovely language /// 'supports'. Each reading entry has a frequency assigned how often it occurrs for the given /// Kanji. #[derive(Serialize, Deserialize)] pub struct FrequencyIndex { pub data: HashMap, } impl FrequencyIndex { /// Create a new FrequencyIndex with a given set of kanji that will be supported pub fn new(all_kanji: &[Kanji]) -> FrequencyIndex { let mut data = HashMap::new(); for kanji in all_kanji { let mut readings = vec![]; let on = kanji.onyomi.clone(); let kun = kanji.kunyomi.clone(); for reading in on.into_iter().chain(kun.into_iter()) { readings.push(reading); } data.insert(kanji.literal, KFreqItem::new(readings)); } FrequencyIndex { data } } /// Inserts a new reading for the given kanji. All readings of the kanji for those `matches` /// returns `true` will be incremented pub fn add_reading(&mut self, kanji_lit: char, matches: F) -> bool where F: Fn(&str) -> bool, { let entry = match self.data.get_mut(&kanji_lit) { Some(s) => s, None => return false, }; let c = entry .readings .iter_mut() .filter(|i| matches(&i.reading)) .map(|i| i.count += 1) .count(); if c == 0 { return false; } // We're passing one reading. If there are multiple entries for one single entry, // they're treated equally, so we're counting up all matches but only counting one // total entry.inc_total(1); true } /// Removes all empty items from the index pub fn clear(&mut self) { self.data.retain(|_, v| !v.is_empty()); } /// Returns a FreqData for the kanji `c` #[inline] pub fn get(&self, c: char) -> Option<&KFreqItem> { self.data.get(&c) } /// Returns the normalized frequency for `reading` #[inline] pub fn norm_reading_freq(&self, kanji: char, reading: &str) -> Option { self.norm_reading_freq_th(kanji, reading, 200) } /// Returns the normalized frequency for `reading` #[inline] pub fn norm_reading_freq_th(&self, kanji: char, reading: &str, th: usize) -> Option { let freq_data = self.data.get(&kanji)?; let read_freq = freq_data.get_reading(reading)?.count; if freq_data.total() < th { return None; } Some(read_freq as f32 / freq_data.total() as f32) } } ================================================ FILE: lib/indexes/src/kanji/reading_freq/reading.rs ================================================ use serde::{Deserialize, Serialize}; /// Reading and its frequency #[derive(Serialize, Deserialize, Debug, Default, PartialEq, Ord, Eq)] pub struct ReadingFreq { pub reading: String, pub count: u32, } impl ReadingFreq { /// Creates a new Reading #[inline] pub fn new(reading: String) -> Self { Self { reading, count: 0 } } /// Increment the reading #[inline] pub fn inc(&mut self, c: u32) { self.count += c; } #[inline] pub fn is_empty(&self) -> bool { self.count == 0 } } impl PartialOrd for ReadingFreq { #[inline] fn partial_cmp(&self, other: &Self) -> Option { self.count.partial_cmp(&other.count) } } ================================================ FILE: lib/indexes/src/lib.rs ================================================ pub mod hashtag; pub mod kanji; pub mod names; pub mod ng_freq; pub mod radical; pub mod regex; pub mod sentences; pub mod storage; pub mod term_freq; pub mod words; pub use storage::{get, suggestions::get_suggestions}; ================================================ FILE: lib/indexes/src/names.rs ================================================ pub const FOREIGN_NGRAM: usize = 3; pub type ForeignIndex = ngindex::NgramIndex; pub const NATIVE_NGRAM: usize = 3; pub type NativeIndex = ngindex::NgramIndex; ================================================ FILE: lib/indexes/src/ng_freq.rs ================================================ use ngram_tools::iter::wordgrams::Wordgrams; use serde::{Deserialize, Serialize}; use sparse_vec::{SpVec32, VecExt}; use crate::term_freq::{TermFreqIndex, VecBuilder}; /// Wrapper around Term frequency index counting ngrams of terms instead of the terms intelf. #[derive(Serialize, Deserialize)] pub struct NgFreqIndex { n: usize, index: TermFreqIndex, } impl NgFreqIndex { pub fn new(n: usize) -> Self { let index = TermFreqIndex::new(); Self { n, index } } /// Returns the amount of indexed terms #[inline] pub fn len(&self) -> usize { self.index.len() } #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn compress(&mut self, threshold: usize) { self.index.compress(threshold) } pub fn insert(&mut self, gloss: &str) { if gloss.trim().is_empty() { return; } let padded = self.get_padded(gloss); let n = Self::n_for(gloss, self.n); let ngrams = Wordgrams::new(&padded, n); for ngram in ngrams { self.index.insert(ngram.to_string()); } } #[inline] pub fn build_vec_cntx>(&self, builder: &mut VecBuilder, inp: A) -> SpVec32 { self.build_custom_vec_cntx(builder, inp, |freq, tot| (tot / freq).log2()) } #[inline] pub fn build_vec>(&self, inp: A) -> SpVec32 { self.build_custom_vec(inp, |freq, tot| (tot / freq).log2()) } #[inline] pub fn vec_builder(&self) -> VecBuilder { VecBuilder::new(&self.index) } pub fn build_custom_vec(&self, inp: A, inv_freq: F) -> SpVec32 where A: AsRef, F: Fn(f32, f32) -> f32, { if inp.as_ref().trim().is_empty() { return SpVec32::default(); } let inp = inp.as_ref(); let padded = self.get_padded(inp); let n = Self::n_for(inp, self.n); let mut no_hit_counter = 0; let ng_ids: Vec<_> = Wordgrams::new(&padded, n) .map(|i| { let id = self.index.t_ids.get(i).copied().unwrap_or_else(|| { no_hit_counter += 1; self.index.total as u32 + no_hit_counter }); //let freq = self.index.inv_freq_oov(i); let t_freq = self.index.freq_by_id(id).unwrap_or(1) as f32; let weight = (inv_freq)(t_freq, self.index.total as f32); (id, weight) }) .collect(); SpVec32::create_new_raw(ng_ids) } pub fn build_custom_vec_cntx( &self, builder: &mut VecBuilder, inp: A, inv_freq: F, ) -> SpVec32 where A: AsRef, F: Fn(f32, f32) -> f32, { if inp.as_ref().trim().is_empty() { return SpVec32::default(); } let inp = inp.as_ref(); let padded = self.get_padded(inp); let n = Self::n_for(inp, self.n); let ng_ids: Vec<_> = Wordgrams::new(&padded, n) .map(|i| { let id = builder.get_or_insert_id(i); let t_freq = self.index.freq_by_id(id).unwrap_or(1) as f32; let weight = (inv_freq)(t_freq, self.index.total as f32); (id, weight) }) .collect(); SpVec32::create_new_raw(ng_ids) } #[inline] fn n_for(inp: &str, n: usize) -> usize { n.min(inp.len()) } #[inline] fn get_padded(&self, inp: &str) -> String { let n = Self::n_for(inp, self.n); ngram_tools::padding(inp, n - 1) } } // TODO: Put this function into some lib (maybe sparse vector?) #[inline] pub fn term_dist(a: &SpVec32, b: &SpVec32) -> f32 { if a.is_empty() || b.is_empty() { return 0.0; } let both = a .intersect_iter(b) .map(|(_, a_w, b_w)| a_w + b_w) .sum::(); let sum = a.weights().chain(b.weights()).sum::(); both / sum } #[cfg(test)] mod test { use test_case::test_case; use super::*; #[test_case("musik", 1)] #[test_case("musik", 2)] #[test_case("musik", 3)] #[test_case("ki", 1)] #[test_case("ki", 2)] #[test_case("ki", 3)] fn test_single(term: &str, n: usize) { let mut ngindex = NgFreqIndex::new(n); ngindex.insert(term); let music_vec = ngindex.build_vec(term); let term_len = term.len(); // Check proper length of vectors let pad_len = n.saturating_sub(1); let tot_len = pad_len * 2 + term_len; if term_len < n { assert_eq!(music_vec.dim_count(), tot_len - n); } else { assert_eq!(music_vec.dim_count(), tot_len - n + 1); } } #[test] fn test_freq() { let mut ngindex = NgFreqIndex::new(2); ngindex.insert("huhu"); let freq = ngindex.index.freq("hu"); assert_eq!(freq, Some(2)); } #[test] fn test_sim() { let mut ngindex = NgFreqIndex::new(3); ngindex.insert("freund"); ngindex.insert("hund"); ngindex.insert("kunde"); ngindex.insert("bund"); let kund = ngindex.build_vec("kund"); let kunde = ngindex.build_vec("kunde"); let hund = ngindex.build_vec("hund"); let sim_kund_kunde = term_dist(&kund, &kunde); let sim_kund_hund = term_dist(&kund, &hund); assert!(sim_kund_kunde > sim_kund_hund); } } ================================================ FILE: lib/indexes/src/radical.rs ================================================ use std::collections::HashMap; use bktree::BkTree; use serde::{Deserialize, Serialize}; use types::jotoba::kanji::radical::SearchRadicalInfo; /// Radicals indexed by its meanings #[derive(Serialize, Deserialize)] pub struct RadicalIndex { pub meaning_map: HashMap>, pub term_tree: BkTree, } impl RadicalIndex { pub fn new( meaning_map: HashMap>, term_tree: BkTree, ) -> Self { Self { meaning_map, term_tree, } } /// Returns `true` if the index contains `term` #[inline(always)] pub fn has_term(&self, term: &str) -> bool { self.meaning_map.contains_key(term) } /// Returns `SearchRadicalInfo` from the index by its term or `None` if term is not found #[inline(always)] pub fn get(&self, term: &str) -> Option<&Vec> { self.meaning_map.get(term) } } ================================================ FILE: lib/indexes/src/regex.rs ================================================ use serde::{Deserialize, Serialize}; use std::collections::{hash_map::Iter, HashMap, HashSet}; /// Index to allow fast and efficient regex search queries. #[derive(Serialize, Deserialize)] pub struct RegexSearchIndex { data: HashMap>, } impl RegexSearchIndex { /// Creates a new empty Index #[inline] pub fn new() -> Self { RegexSearchIndex { data: HashMap::new(), } } /// Returns an iterator over all items in the index #[inline] pub fn iter(&self) -> Iter> { self.data.iter() } /// Returns a HashSet with all words (seq_ids) that contain the given character #[inline(always)] pub fn get_words_with(&self, character: char) -> Option<&HashSet> { self.data.get(&character) } /// Adds a new term to the index #[inline] pub fn add_term(&mut self, term: &str, seq_id: u32) { for c in term.chars() { self.data.entry(c).or_default().insert(seq_id); } } } ================================================ FILE: lib/indexes/src/sentences.rs ================================================ use vsm::presets::VSMIndexSimple; // Shortcut for type of index pub type NativeIndex = VSMIndexSimple; pub type ForeignIndex = VSMIndexSimple; ================================================ FILE: lib/indexes/src/storage/kanji.rs ================================================ use crate::kanji::reading_freq::FrequencyIndex; use std::{error::Error, fs::File, io::BufReader, path::Path}; pub const K_READINGS_FREQ_FILE: &str = "kreading_freq_index"; /// Store for name indexes pub struct KanjiStore { kread_frequency: FrequencyIndex, } impl KanjiStore { pub fn new(kread_frequency: FrequencyIndex) -> Self { Self { kread_frequency } } #[inline(always)] pub fn reading_freq(&self) -> &FrequencyIndex { &self.kread_frequency } } pub(crate) fn load>(path: P) -> Result> { let kread_file = Path::new(path.as_ref()).join(K_READINGS_FREQ_FILE); let kread_frequency: FrequencyIndex = bincode::deserialize_from(BufReader::new(File::open(kread_file)?))?; Ok(KanjiStore::new(kread_frequency)) } ================================================ FILE: lib/indexes/src/storage/mod.rs ================================================ pub mod kanji; pub mod name; pub mod radical; pub mod sentence; pub mod suggestions; pub(crate) mod utils; pub mod word; use once_cell::sync::OnceCell; use std::{error::Error, path::Path}; use { kanji::KanjiStore, name::NameStore, radical::RadicalStore, sentence::SentenceStore, word::WordStore, }; /// In-memory store for all indexes pub(crate) static INDEX_STORE: OnceCell = OnceCell::new(); /// Store for all indexes pub struct IndexStore { word: WordStore, sentence: SentenceStore, name: NameStore, radical: RadicalStore, kanji: KanjiStore, } impl IndexStore { #[inline(always)] pub fn word(&self) -> &WordStore { &self.word } #[inline(always)] pub fn sentence(&self) -> &SentenceStore { &self.sentence } #[inline(always)] pub fn name(&self) -> &NameStore { &self.name } #[inline(always)] pub fn radical(&self) -> &RadicalStore { &self.radical } #[inline(always)] pub fn kanji(&self) -> &KanjiStore { &self.kanji } /// Returns `true` if all indexes are properly loaded pub fn check(&self) -> bool { self.word.check() && self.sentence.check() && self.name.check() && self.radical.check() } } /// Returns an IndexStore which can be used to retrieve all indexes #[inline(always)] pub fn get() -> &'static IndexStore { unsafe { INDEX_STORE.get_unchecked() } } /// Loads all indexes pub fn load>(index_folder: P) -> Result> { if is_loaded() { return Ok(true); } let store = load_raw(index_folder)?; if !store.check() { return Ok(false); } INDEX_STORE.set(store).ok(); Ok(true) } pub fn is_loaded() -> bool { INDEX_STORE.get().is_some() } /// Needed for tests only pub fn wait() { INDEX_STORE.wait(); } pub fn load_raw>( index_folder: P, ) -> Result> { log::debug!("Loading word index"); let word = word::load(index_folder.as_ref())?; log::debug!("Loading sentence index"); let sentence = sentence::load(index_folder.as_ref())?; log::debug!("Loading name index"); let name = name::load(index_folder.as_ref())?; log::debug!("Loading radical index"); let radical = radical::load(index_folder.as_ref())?; log::debug!("Loading kanji reading frequency index"); let kanji = kanji::load(index_folder.as_ref())?; Ok(IndexStore { word, sentence, name, radical, kanji, }) } ================================================ FILE: lib/indexes/src/storage/name.rs ================================================ use super::utils; use crate::names::{ForeignIndex, NativeIndex}; use std::{error::Error, path::Path}; pub const FOREIGN_FILE: &str = "name_foreign_index"; pub const NATIVE_FILE: &str = "name_jp_index"; /// Store for name indexes pub struct NameStore { foreign: ForeignIndex, native: NativeIndex, } impl NameStore { pub(crate) fn new(foreign: ForeignIndex, native: NativeIndex) -> Self { Self { foreign, native } } #[inline(always)] pub fn foreign(&self) -> &ForeignIndex { &self.foreign } #[inline(always)] pub fn native(&self) -> &NativeIndex { &self.native } pub(crate) fn check(&self) -> bool { true } } pub(crate) fn load>(path: P) -> Result> { let foreign = utils::deser_file(path.as_ref(), FOREIGN_FILE)?; let native = utils::deser_file(path.as_ref(), NATIVE_FILE)?; Ok(NameStore::new(foreign, native)) } ================================================ FILE: lib/indexes/src/storage/radical.rs ================================================ use super::utils; use crate::radical::RadicalIndex; use std::{error::Error, path::Path}; pub const RAD_INDEX_FILE: &str = "radical_index"; /// Store for radical indexes pub struct RadicalStore { rad_index: RadicalIndex, } impl RadicalStore { pub(crate) fn new(rad_index: RadicalIndex) -> Self { Self { rad_index } } /// Returns the meaning index for radicals #[inline] pub fn meaning_index(&self) -> &RadicalIndex { &self.rad_index } /// Returns true if data is valid pub(crate) fn check(&self) -> bool { !self.rad_index.meaning_map.is_empty() } } pub(crate) fn load>(path: P) -> Result> { let index = utils::deser_file(path, RAD_INDEX_FILE)?; let store = RadicalStore::new(index); Ok(store) } ================================================ FILE: lib/indexes/src/storage/sentence.rs ================================================ use super::utils; use crate::sentences::{ForeignIndex, NativeIndex}; use std::{error::Error, path::Path}; pub const NATIVE_FILE: &str = "sentences_jp_index"; pub const FOREIGN_FILE: &str = "sentences_fg_index"; /// Store for sentence indexes pub struct SentenceStore { native: NativeIndex, foreign: ForeignIndex, } impl SentenceStore { pub(crate) fn new(native: NativeIndex, foreign: ForeignIndex) -> Self { Self { foreign, native } } /// Returns the foreign index for the given language or `None` if not loaded #[inline(always)] pub fn foreign(&self) -> &ForeignIndex { &self.foreign } /// Returns the japanese sentence index #[inline(always)] pub fn native(&self) -> &NativeIndex { &self.native } pub(crate) fn check(&self) -> bool { true } } pub(crate) fn load>(path: P) -> Result> { let native = utils::deser_file(path.as_ref(), NATIVE_FILE)?; let foreign = utils::deser_file(path.as_ref(), FOREIGN_FILE)?; Ok(SentenceStore::new(native, foreign)) } ================================================ FILE: lib/indexes/src/storage/suggestions.rs ================================================ use crate::hashtag::HashTagIndex; use super::utils; use autocompletion::index::{basic::BasicIndex, japanese::JapaneseIndex}; use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, error::Error, path::Path}; use types::jotoba::language::Language; pub const K_MEANING_NGRAM: usize = 3; pub const FG_WORDS_NGRAM: usize = 3; pub const JP_WORDS_NGRAM: usize = 2; pub const FG_NAMES_NGRAM: usize = 3; pub const JP_NAMES_NGRAM: usize = 2; pub const SUGGESTION_FILE: &str = "suggestions"; /// In-memory store for all suggestion indexes pub(crate) static SUGGESTION_STORE: OnceCell = OnceCell::new(); /// Contains all suggestion index data #[derive(Serialize, Deserialize)] pub struct SuggestionStorage { jp_words: JapaneseIndex, foreign_words: HashMap>, kanji_meanings: JapaneseIndex, names_native: JapaneseIndex, names_foreign: BasicIndex, hashtag: HashTagIndex, } impl SuggestionStorage { pub fn new( jp_words: JapaneseIndex, foreign_words: HashMap>, kanji_meanings: JapaneseIndex, names_native: JapaneseIndex, names_foreign: BasicIndex, hashtag: HashTagIndex, ) -> Self { Self { jp_words, foreign_words, kanji_meanings, names_native, names_foreign, hashtag, } } #[inline] pub fn jp_words(&self) -> &JapaneseIndex { &self.jp_words } #[inline] pub fn foreign_words(&self, language: Language) -> Option<&BasicIndex> { self.foreign_words.get(&language) } #[inline] pub fn kanji_meanings(&self) -> &JapaneseIndex { &self.kanji_meanings } #[inline] pub fn names_native(&self) -> &JapaneseIndex { &self.names_native } #[inline] pub fn names_foreign(&self) -> &BasicIndex { &self.names_foreign } #[inline] pub fn hashtags(&self) -> &HashTagIndex { &self.hashtag } pub fn check(&self) -> bool { utils::check_lang_map(&self.foreign_words) } } pub fn load_raw>( file: P, ) -> Result> { utils::deser_file(file, "") } pub fn load>(path: P) -> Result> { let store = load_raw(path)?; Ok(SUGGESTION_STORE.set(store).is_ok()) } #[inline] pub fn get_suggestions() -> &'static SuggestionStorage { unsafe { SUGGESTION_STORE.get_unchecked() } } ================================================ FILE: lib/indexes/src/storage/utils.rs ================================================ use serde::de::DeserializeOwned; use std::{ collections::HashMap, error::Error, fs::File, io::{BufReader, Read}, path::Path, }; use types::jotoba::language::Language; /// Deserializes a file from `path` with `name` pub fn deser_file>( path: P, name: &str, ) -> Result> { let path = if name.is_empty() { path.as_ref().to_path_buf() } else { path.as_ref().join(name) }; Ok(fast_deser(path)?) } pub fn load_by_language>( path: P, prefix: &str, load: F, ) -> Result, Box> where F: Fn(&Path) -> Result, Box>, { let mut map = HashMap::with_capacity(10); // All index files in index source folder let files = std::fs::read_dir(path)?.map(|res| res.map(|e| e.path())); for file in files { let file = file?; let file_name = file.file_name().and_then(|i| i.to_str()).unwrap(); if !file_name.starts_with(prefix) { continue; } match load(file.as_ref())? { Some((lang, deser)) => { map.insert(lang, deser); } None => (), }; } Ok(map) } /* pub fn lang_from_file>(file: F, prefix: &str) -> Option { let file_name = file.as_ref().file_name()?.to_str()?.to_string(); let lang_str = file_name.strip_prefix(prefix).unwrap(); Language::from_str(lang_str).ok() } */ /// Returns true if `map` has an entry for all language keys pub fn check_lang_map(map: &HashMap) -> bool { Language::iter_word().all(|i| map.contains_key(&i)) } // A bit faster. Who cares about memory consumption anyways fn fast_deser>( file_path: P, ) -> Result> { let file = File::open(file_path)?; let len = file.metadata()?.len(); let mut buf = vec![0u8; len as usize]; let mut reader = BufReader::new(file); reader.read_exact(&mut buf)?; Ok(bincode::deserialize(&buf)?) } ================================================ FILE: lib/indexes/src/storage/word.rs ================================================ use super::utils; use crate::{ kanji, regex::RegexSearchIndex, words::{ForeignIndex, NativeIndex}, }; use log::debug; use std::{collections::HashMap, error::Error, path::Path, str::FromStr}; use types::jotoba::language::Language; pub const FOREIGN_PREFIX: &str = "word_index_"; pub const NATIVE_FILE: &str = "jp_index"; pub const REGEX_FILE: &str = "regex_index"; pub const KANJI_READING_INDEX: &str = "word_kr_index"; /// Store for words pub struct WordStore { foreign: HashMap, native: NativeIndex, regex: RegexSearchIndex, k_reading: kanji::reading::Index, } impl WordStore { pub(crate) fn new( foreign: HashMap, native: NativeIndex, regex: RegexSearchIndex, k_reading: kanji::reading::Index, ) -> Self { Self { foreign, native, regex, k_reading, } } /// Returns the foreign index for the given language #[inline] pub fn foreign(&self, language: Language) -> Option<&ForeignIndex> { self.foreign.get(&language) } #[inline] pub fn regex(&self) -> &RegexSearchIndex { &self.regex } #[inline] pub fn k_reading(&self) -> &kanji::reading::Index { &self.k_reading } #[inline] pub fn native(&self) -> &NativeIndex { &self.native } pub(crate) fn check(&self) -> bool { utils::check_lang_map(&self.foreign) } } #[cfg(not(feature = "parallel"))] pub(crate) fn load>(path: P) -> Result> { let start = std::time::Instant::now(); let foreign = load_foreign(path.as_ref())?; let native = utils::deser_file(path.as_ref(), NATIVE_FILE)?; let regex = utils::deser_file(path.as_ref(), REGEX_FILE)?; let k_reading = utils::deser_file(path.as_ref(), KANJI_READING_INDEX)?; debug!("Loading indexes sync took: {:?}", start.elapsed()); Ok(WordStore::new(foreign, native, regex, k_reading)) } #[cfg(feature = "parallel")] pub(crate) fn load + Send + Sync>( path: P, ) -> Result> { let start = std::time::Instant::now(); let mut foreign = None; let mut native = None; let mut regex: Option>> = None; let mut k_reading = None; rayon::scope(|s| { s.spawn(|_| { foreign = Some(load_foreign(path.as_ref())); }); s.spawn(|_| { native = Some(utils::deser_file(path.as_ref(), NATIVE_FILE)); }); s.spawn(|_| { regex = Some(utils::deser_file(path.as_ref(), REGEX_FILE)); }); s.spawn(|_| { k_reading = Some(utils::deser_file(path.as_ref(), KANJI_READING_INDEX)); }); }); let foreign = foreign.unwrap()?; let native = native.unwrap()?; let regex = regex.unwrap()?; let k_reading = k_reading.unwrap()?; debug!("Loading indexes parallel took: {:?}", start.elapsed()); Ok(WordStore::new(foreign, native, regex, k_reading)) } fn load_foreign>( path: P, ) -> Result, Box> { utils::load_by_language(path, FOREIGN_PREFIX, |p| { //let index = ForeignIndex::open(p)?; let index: ForeignIndex = utils::deser_file(p, "").unwrap(); let file_name = p .file_name() .unwrap() .to_str() .unwrap() .strip_prefix(FOREIGN_PREFIX) .unwrap(); let lang = Language::from_str(file_name).unwrap(); //let lang = index.get_metadata().language; Ok(Some((lang, index))) }) } ================================================ FILE: lib/indexes/src/term_freq.rs ================================================ use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// Index to index frequencies of terms #[derive(Serialize, Deserialize)] pub struct TermFreqIndex { pub(crate) freqs: HashMap, pub(crate) t_ids: HashMap, pub(crate) total: usize, } impl TermFreqIndex { pub fn new() -> Self { Self { freqs: HashMap::new(), t_ids: HashMap::new(), total: 0, } } /// Returns the amount of indexed terms #[inline] pub fn len(&self) -> usize { self.freqs.len() } #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Insert a new term into the index or increases the /// frequency value of an existing term pub fn insert(&mut self, term: String) { self.total += 1; let freq = self.t_ids.get(&term).and_then(|id| self.freqs.get_mut(&id)); if let Some(freq) = freq { *freq += 1; return; } let new_id = self.t_ids.len() as u32; self.t_ids.insert(term, new_id); self.freqs.insert(new_id, 1); } // Remove all terms with frequency `threshold` and treat out of dict // ngrams as frequency = `1` to reduce memory usage. pub fn compress(&mut self, threshold: usize) { self.t_ids.retain(|_, id| { let freq = *self.freqs.get(id).unwrap(); if freq < threshold as u32 { self.freqs.remove(id).unwrap(); return false; } true }); } #[inline] pub fn vec_builder(&self) -> VecBuilder { VecBuilder::new(self) } #[inline] pub fn get_id(&self, term: &str) -> Option { self.t_ids.get(term).copied() } #[inline] pub fn freq(&self, term: &str) -> Option { let id = self.get_id(term)?; self.freq_by_id(id) } #[inline] pub fn freq_by_id(&self, id: u32) -> Option { self.freqs.get(&id).copied() } /// Inverted frequency. Out-of-vocab terms return `None` #[inline] pub fn inv_freq(&self, term: &str) -> Option { let freq = self.freq(term)? as f32; let total = self.total as f32; Some((total / freq).log2()) } /// Inverted frequency but out-of-vocab terms are treated as freq=1 #[inline] pub fn inv_freq_oov(&self, term: &str) -> f32 { let freq = self.freq(term).unwrap_or(1) as f32; let total = self.total as f32; (total / freq).log2() } } /// Helper for building correct term frequency vectors pub struct VecBuilder<'index> { index: &'index TermFreqIndex, new_terms: HashMap, } impl<'index> VecBuilder<'index> { #[inline] pub(crate) fn new(index: &'index TermFreqIndex) -> Self { Self { index, new_terms: HashMap::new(), } } /// Retrieves the ID of a term or creates a new one and returns it #[inline] pub fn get_or_insert_id>(&mut self, term: S) -> u32 { let term = term.as_ref(); // Try indexed ID self.index.t_ids.get(term).copied().unwrap_or_else(|| { // Try newly created term ID self.new_terms.get(term).copied().unwrap_or_else(|| { // Insert new ID let new_id = (self.new_terms.len() + self.index.t_ids.len()) as u32; self.new_terms.insert(term.to_string(), new_id); new_id }) }) } } ================================================ FILE: lib/indexes/src/words/foreign.rs ================================================ use crate::ng_freq::NgFreqIndex; use serde::{Deserialize, Serialize}; use std::ops::Deref; use vsm::presets::VSMIndexSimple; /// N-gram with for string similarity (NgFreqIndex) index pub type WordVecIndex = VSMIndexSimple; pub const NG_FREQ_N: usize = 3; #[derive(Serialize, Deserialize)] pub struct ForeignIndex { pub vsm_index: WordVecIndex, pub ng_index: NgFreqIndex, } impl ForeignIndex { pub fn new(vsm_index: WordVecIndex, ng_index: NgFreqIndex) -> Self { Self { vsm_index, ng_index, } } #[inline] pub fn vsm_index(&self) -> &WordVecIndex { &self.vsm_index } #[inline] pub fn ng_index(&self) -> &NgFreqIndex { &self.ng_index } } impl Deref for ForeignIndex { type Target = WordVecIndex; #[inline] fn deref(&self) -> &Self::Target { self.vsm_index() } } ================================================ FILE: lib/indexes/src/words/mod.rs ================================================ pub mod foreign; pub mod native; // Shortcut for types of index pub type ForeignIndex = foreign::ForeignIndex; pub type NativeIndex = native::NativeIndex; ================================================ FILE: lib/indexes/src/words/native.rs ================================================ use serde::{Deserialize, Serialize}; use crate::ng_freq::NgFreqIndex; use std::ops::Deref; pub const N: usize = 3; pub type WordVecIndex = ngindex::NgramIndex; /// Japanese word index #[derive(Serialize, Deserialize)] pub struct NativeIndex { /// Japanese Word index pub index: WordVecIndex, /// Ng-Term frequency index pub tf_index: NgFreqIndex, } impl NativeIndex { pub fn new(vsm_index: WordVecIndex, ng_index: NgFreqIndex) -> Self { Self { index: vsm_index, tf_index: ng_index, } } #[inline] pub fn index(&self) -> &WordVecIndex { &self.index } #[inline] pub fn tf_index(&self) -> &NgFreqIndex { &self.tf_index } } impl Deref for NativeIndex { type Target = WordVecIndex; #[inline] fn deref(&self) -> &Self::Target { self.index() } } ================================================ FILE: lib/japanese/Cargo.toml ================================================ [package] name = "japanese" version = "0.1.0" authors = ["jojii "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] itertools = "0.11.0" # wana_kana = { git = "https://github.com/WeDontPanic/wana_kana_rust" } wana_kana = { git = "https://github.com/PSeitz/wana_kana_rust" } jp_utils = { git = "https://github.com/JojiiOfficial/jp_utils", features = ["furigana"] } once_cell = "*" [dev-dependencies] test-case = "3.1.0" resources = { path = "../resources" } japanese = { path = "../japanese" } ================================================ FILE: lib/japanese/src/furigana/generate/mod.rs ================================================ pub mod traits; pub use traits::ReadingRetrieve; use super::map_readings; use crate::ToKanaExt; use itertools::Itertools; use jp_utils::{ furi::{parse::FuriParser, segment::AsSegment}, reading::ReadingRef, JapaneseExt, }; use std::collections::HashSet; /// Generates furigana readings for the given `kanji` input based on the provided `kana` reading and /// kanji readings which are being passed using `retrieve`. In case a reading can't be correctly /// identified, the full kanji<->kana furigana block is getting returned instead of an error. pub fn checked(retrieve: R, kanji: &str, kana: &str) -> String { let unchecked_furi = match unchecked(retrieve, kanji, kana) { Some(u) => u, // None => return single_block(kanji, kana), None => return ReadingRef::new_with_kanji(kana, kanji).encode().into(), }; let furi_parsed = FuriParser::new(&unchecked_furi) .map(|i| i.unwrap().get_kana_reading()) .join(""); // check if built correctly check(&furi_parsed, kana) .then(|| unchecked_furi) // if not correct use one block for all // .unwrap_or_else(|| single_block(kanji, kana)) .unwrap_or_else(|| ReadingRef::new_with_kanji(kana, kanji).encode().into()) } fn check(gen: &str, kana: &str) -> bool { let gen = gen .chars() .filter(|c| !c.is_symbol()) .collect::() .to_hiragana(); let kana = kana .chars() .filter(|c| !c.is_symbol()) .collect::() .to_hiragana(); gen == kana } /// Generates furigana readings for the given `kanji` input based on the provided `kana` reading and /// kanji readings which are being passed using `retrieve` pub fn unchecked(retrieve: R, kanji: &str, kana: &str) -> Option { let kanji_mappings = map_readings(kanji, kana)?; Some(gen_iter(retrieve, kanji, kanji_mappings).join("")) } /// Returns an iterator over all encoded furigana parts pub fn gen_iter<'a, R>( retrieve: R, kanji_text: &'a str, readings: Vec<(String, String)>, ) -> impl Iterator + 'a where R: ReadingRetrieve + 'a, { let mut text_parts = jp_utils::tokenize::by_alphabet(kanji_text, true); let mut furi = readings.into_iter(); std::iter::from_fn(move || { let curr_part = text_parts.next()?; // No need to encode kana parts if !curr_part.is_kanji() { return Some(curr_part.to_string()); } let (kanji, reading) = furi.next()?; if let Some(readings) = assign_readings(&retrieve, &kanji, &reading) { if readings.len() != kanji.chars().count() { // return Some(single_block(kanji, reading)); return Some(ReadingRef::new_with_kanji(&reading, &kanji).encode().into()); } let reading = readings.into_iter().map(|i| i.1).join("|"); // return Some(single_block(kanji, reading)); return Some(ReadingRef::new_with_kanji(&reading, &kanji).encode().into()); } // Some(single_block(kanji, reading)) return Some(ReadingRef::new_with_kanji(&reading, &kanji).encode().into()); }) } /// Takes a kanji(compound) and the assigned kana reading and returns (hopefully) a list of the /// provided kanji with the pub fn assign_readings( retrieve: R, kanji: &str, kana: &str, ) -> Option> { let kanji_len = kanji.real_len(); let kana_len = kana.real_len(); // If both have len of 2 the readings are obv if kanji_len == kana_len { return Some( kanji .chars() .zip(kana.chars()) .map(|(kanji, kana)| (kanji.to_string(), kana.to_string())) .collect(), ); } let kanji_lits = get_kanji_literals(kanji); if kanji_lits.len() == 1 { return Some(vec![(kanji.to_owned(), kana.to_owned())]); } let kanji_readings = kanji_lits .iter() .map(|i| (*i, format_readings(retrieve.all(*i)))) .collect::>(); if kanji_readings.is_empty() { return None; } find_kanji_combo(kanji_readings, kana) } /// Find the exact readings of a kanji literal within a kanji compound fn find_kanji_combo( readings_map: Vec<(char, HashSet)>, kana: &str, ) -> Option> { let mut routes: Vec<(usize, Vec, &str)> = vec![(0, vec![], kana)]; for (pos, (_, readings)) in readings_map.iter().enumerate() { let route_pos = pos + 1; let last_routes = routes .clone() .into_iter() .filter(|i| i.0 == pos) .collect_vec(); if last_routes.is_empty() { return None; } for route in last_routes.iter() { let pref = find_prefix(&readings, route.2); for pref in pref { let mut curr_route_readings = route.1.clone(); curr_route_readings.push(pref.clone()); let new_route = ( route_pos, curr_route_readings, &route.2[pref.bytes().len()..], ); routes.push(new_route); } } } let valid_routes = routes .iter() .filter(|i| i.2.is_empty()) .cloned() .collect_vec(); let valid_routes = if valid_routes.is_empty() && !routes.is_empty() { let lasti = routes.last().as_ref().unwrap().2.to_owned(); let mut last = routes.last().unwrap().to_owned(); let last_count = routes .iter() .filter(|i| i.0 + 1 == readings_map.len()) .count(); // If only one last kanji reading is missing, just apply the kana char if last.1.len() + 1 == readings_map.len() && last_count == 1 { last.1.push(lasti); // Check if this is really the same as the kana reading if last.1.clone().join("") == kana { vec![last] } else { valid_routes } } else { valid_routes } } else { valid_routes }; // No or multiple routes found should be treated as invalid if valid_routes.is_empty() || valid_routes.len() > 1 { return None; } let route = valid_routes[0].1.clone(); Some( readings_map .into_iter() .map(|i| i.0.to_string()) .zip(route) .collect_vec(), ) } fn get_kanji_literals(inp: &str) -> Vec { inp.chars().filter(|i| i.is_kanji()).collect() } fn find_prefix(prefixe: &HashSet, text: &str) -> Vec { prefixe .iter() .filter(|i| text.to_hiragana().starts_with(&i.to_hiragana())) .cloned() .collect_vec() } fn format_readings(r: Vec) -> HashSet { r.into_iter() .map(|i| i.replace("-", "")) .map(|i| { if i.contains('.') { // On kun readigs, replace everything after the '.' let fmt1 = i.split('.').next().unwrap().to_owned().to_hiragana(); let fmt2 = i.replace('.', "").to_hiragana(); vec![fmt1, fmt2] } else { vec![i.to_hiragana()] } }) .flatten() .collect() } ================================================ FILE: lib/japanese/src/furigana/generate/traits.rs ================================================ pub trait ReadingRetrieve { fn onyomi(&self, lit: char) -> Vec; fn kunyomi(&self, lit: char) -> Vec; fn all(&self, lit: char) -> Vec { self.kunyomi(lit) .into_iter() .chain(self.onyomi(lit).into_iter()) .collect() } } impl ReadingRetrieve for &T { fn onyomi(&self, lit: char) -> Vec { (*self).onyomi(lit) } fn kunyomi(&self, lit: char) -> Vec { (*self).kunyomi(lit) } } ================================================ FILE: lib/japanese/src/furigana/mod.rs ================================================ pub mod generate; mod tests; use itertools::Itertools; use jp_utils::JapaneseExt; use crate::ToKanaExt; /// Generates all kanji readins from a kanji and kana string an returns them (kanji, kana) fn map_readings(kanji: &str, kana: &str) -> Option> { let kana = kana.chars().filter(|s| !s.is_symbol()).collect::>(); let mut kana_pos = strip_until_kanji(kanji.chars()); let mut kanji_iter = kanji.chars().filter(|i| !i.is_symbol()).skip(kana_pos); let mut result: Vec<(String, String)> = Vec::new(); let mut curr_kanji = Vec::new(); loop { if kana_pos >= kana.len() { break; } // Kana from current position to end let curr_kana = &kana[kana_pos..]; let kk = kanji_iter.clone().collect_vec(); // Get all chars until next kanji let (part_kana, part_kanji) = to_next_kanji(&mut kanji_iter); // If last part is kanji only take rest of kana reading if part_kana.is_empty() { result.push((part_kanji.iter().collect(), curr_kana.iter().collect())); break; } // Current kanji buff curr_kanji.clear(); let mut counter = 1; let found = loop { if kana_pos >= kana.len() { break false; } curr_kanji.push(kana[kana_pos]); kana_pos += 1; // Require at least as much kana characters as kanji characters if counter < part_kanji.len() { counter += 1; continue; } if starts_with( curr_kana, &curr_kanji, &part_kana, !has_kanji_after(&kk, part_kanji.len() + part_kana.len()), ) { break true; } if curr_kanji.len() >= curr_kana.len() || kana_pos >= kana.len() { break false; } counter += 1; }; if !found { // Error return None; } result.push(( char_arr_to_string(&part_kanji), char_arr_to_string(&curr_kanji), )); for _ in 0..(part_kana.len() + part_kanji.len()) { kanji_iter.next(); } kana_pos += part_kana.len(); } Some(result) } /// Returns true if there are kanji elements within arr after the given offset fn has_kanji_after(arr: &[T], offset: usize) -> bool where T: JapaneseExt, { if offset >= arr.len() { return false; } arr[offset..] .iter() .any(|i| i.is_kanji() || i.is_roman_letter()) } /// Checks whether 'arr' starts with a*b or not fn starts_with(arr: &[T], a: &[T], b: &[T], last: bool) -> bool where T: PartialEq + ToKanaExt + JapaneseExt, { if last { if a.len() + b.len() != arr.len() { return false; } } else if a.len() + b.len() > arr.len() { return false; } for (pos, item) in a.iter().enumerate() { if arr[pos].to_hiragana() != *item.to_hiragana() { return false; } } for (pos, item) in b.iter().enumerate() { if arr[pos + a.len()].to_hiragana() != *item.to_hiragana() { return false; } } true } /// Helper method to collect all items in a /// Vec into a newly allocated String #[inline] fn char_arr_to_string(vec: &[char]) -> String { vec.iter().collect() } /// Returns all Kanji and kana elements until a new kanji(compound) is reached fn to_next_kanji(kanji_iter: &mut T) -> (Vec, Vec) where T: Iterator + Clone, { let mut kanji_iter = kanji_iter.clone(); let kanji = kanji_iter .take_while_ref(|i| i.is_kanji() || i.is_symbol() || i.is_roman_letter()) .collect::>(); let kana = kanji_iter .take_while_ref(|i| i.is_kana()) .collect::>(); (kana, kanji) } /// Truncates everything from a kanji_iterator until a kanji element has reached and returns the /// amount of trimmed characters fn strip_until_kanji(mut kanji_iter: T) -> usize where T: Iterator, { let mut i = 0; loop { if kanji_iter .next() .map(|i| i.is_kanji() || i.is_symbol() || i.is_roman_letter()) .unwrap_or(true) { break i; } i += 1; } } ================================================ FILE: lib/japanese/src/furigana/tests.rs ================================================ #[cfg(test)] mod tests { use crate::furigana::map_readings; //use resources::LAZY_STORAGE; use test_case::test_case; #[test_case("", "", &vec![]; "Empty")] //#[test_case("音楽が好き", "おんがくがすき", &[("音楽","おんがく"),("好","す")]; "Simple 1")] // TODO: fix this one lol #[test_case("音楽は好き", "おんがくはすき", &[("音楽","おんがく"),("好","す")]; "Simple 1")] #[test_case("お金を払いたくない", "おかねをはらいたくない", &[("金","かね"),("払","はら")]; "Simple 2")] #[test_case("おかねをはらいたくない", "おかねをはらいたくない", &[]; "Kana only")] #[test_case("漢字", "かんじ", &[("漢字","かんじ")]; "Kanji only")] #[test_case("水気","みずけ",&[("水気","みずけ")]; "Kanji only 2")] #[test_case("いつも眠い感じがします", "いつもねむいかんじがします", &[("眠","ねむ"),("感","かん")]; "Simple 3")] #[test_case("今日もとても眠い", "きょうもとてもねむい", &[("今日","きょう"),("眠","ねむ")]; "Simple 4")] #[test_case("5日", "いつか", &[("5日","いつか")]; "With roman letter")] #[test_case("かば、夕べに","かばゆうべに",&[("夕","ゆう")]; "Special char")] fn test_map_readings(kanji: &str, kana: &str, expected: &[(&str, &str)]) { let parsed = map_readings(kanji, kana).unwrap(); let parsed = parsed .iter() .map(|i| (i.0.as_str(), i.1.as_str())) .collect::>(); assert_eq!(parsed, expected); } } ================================================ FILE: lib/japanese/src/guessing.rs ================================================ use jp_utils::JapaneseExt; use crate::ToKanaExt; #[cfg(test)] mod test { use super::*; #[test] fn test_true() { test("shinjitakunakatta", true); test("ongakunante", true); test("shukudai", true); test("akogaredake", true); test("daijoubudesuyo", true); test("denshaninotteru", true); test("sonoshukudaiwokanseishimashitawa", true); test("korewanagaibunshodayone", true); test("atarashiibunwokangaenai", true); test("atarashiibunwokangaenakute", true); test("shinjitai", true); test("ongaku", true); test("sore wa ongaku desu yo", true); test("kirishima", true); test("deine oma", true); test("kyotou", true); test("onsen", true); test("onsei", true); test("otagai", true); test("kansei", true); test("kanpeki", true); test("fuben", true); test("kansetsu", true); test("chokusetsu", true); } #[test] fn test_false() { test("kind", false); test("jinjc", false); test("gx", false); test("kochen macht spaß", false); test("kinderarbeit", false); test("hausaufgaben sind toll", false); test("I can't think of proper sentences lol", false); test("Mir fallen keine weiteren sätze ein lol", false); test("this is a laptop", false); } fn test(inp: &str, assert: bool) { if could_be_romaji(inp) != assert { panic!("{:?} should be {}", inp, assert); } } } /// Returns `true` if input could be romanized japanese text /// /// Example: "sore wa ongaku desu yo" -> true /// Example: "this is ugly" -> false pub fn could_be_romaji(inp: &str) -> bool { is_romaji_repl(inp).is_some() } pub fn is_romaji_repl(inp: &str) -> Option { let mut inp = inp.to_string(); let to_replace = &['.', '(', ')', '、', '。', '「', '」', ' ', '\'', '"']; for to_repl in to_replace { inp = inp.replace(*to_repl, ""); } inp.to_hiragana().is_japanese().then(|| inp) } ================================================ FILE: lib/japanese/src/lib.rs ================================================ pub mod furigana; pub mod guessing; pub mod radicals; pub trait ToKanaExt { fn to_hiragana(&self) -> String; fn to_katakana(&self) -> String; } impl ToKanaExt for char { #[inline] fn to_hiragana(&self) -> String { wana_kana::ConvertJapanese::to_hiragana(self.to_string().as_ref()) } #[inline] fn to_katakana(&self) -> String { wana_kana::ConvertJapanese::to_katakana(self.to_string().as_ref()) } } impl ToKanaExt for String { #[inline] fn to_hiragana(&self) -> String { wana_kana::ConvertJapanese::to_hiragana(self.as_ref()) } #[inline] fn to_katakana(&self) -> String { wana_kana::ConvertJapanese::to_katakana(self.as_ref()) } } impl ToKanaExt for &str { #[inline] fn to_hiragana(&self) -> String { wana_kana::ConvertJapanese::to_hiragana(self.as_ref()) } #[inline] fn to_katakana(&self) -> String { wana_kana::ConvertJapanese::to_katakana(self.as_ref()) } } pub fn to_kk_fmt(inp: &str) -> String { let inp = inp.to_lowercase(); let i = inp.replace("nn", "ン"); wana_kana::ConvertJapanese::to_katakana(i.as_str()) } pub fn to_hira_fmt(inp: &str) -> String { let inp = inp.to_lowercase(); let i = inp.replace("nn", "ん"); wana_kana::ConvertJapanese::to_hiragana(i.as_str()) } /// Returns `true` if `romaji` is a prefix of `hira` where romaji is romaji text and `hira` is text written in hiragana #[inline] pub fn romaji_prefix(romaji: &str, hira: &str) -> bool { wana_kana::ConvertJapanese::to_romaji(hira) .to_lowercase() .starts_with(&romaji.to_lowercase()) } ================================================ FILE: lib/japanese/src/radicals.rs ================================================ use once_cell::sync::Lazy; use std::collections::HashMap; /// Maps radicals to their stroke counts. static RAD_STROKE_MAP: Lazy> = Lazy::new(|| { let mut map: HashMap = HashMap::default(); for rads in RADICALS.iter() { for rad in rads.1 { map.insert(rad.chars().next().unwrap(), rads.0); } } map }); pub const RADICALS: &[(u32, &[&str]); 15] = &[ (1, &["一", "|", "丶", "ノ", "乙", "亅"]), ( 2, &[ "二", "亠", "人", "⺅", "𠆢", "儿", "入", "ハ", "丷", "冂", "冖", "冫", "几", "凵", "刀", "⺉", "力", "勹", "匕", "匚", "十", "卜", "卩", "厂", "厶", "又", "マ", "九", "ユ", "乃", "𠂉", ], ), ( 3, &[ "⻌", "口", "囗", "土", "士", "夂", "夕", "大", "女", "子", "宀", "寸", "小", "⺌", "尢", "尸", "屮", "山", "川", "巛", "工", "已", "巾", "干", "幺", "广", "廴", "廾", "弋", "弓", "ヨ", "彑", "彡", "彳", "⺖", "⺘", "⺡", "⺨", "⺾", "⻏", "⻖", "也", "亡", "及", "久", ], ), ( 4, &[ "⺹", "心", "戈", "戸", "手", "支", "攵", "文", "斗", "斤", "方", "无", "日", "曰", "月", "木", "欠", "止", "歹", "殳", "比", "毛", "氏", "气", "水", "火", "⺣", "爪", "父", "爻", "爿", "片", "牛", "犬", "⺭", "王", "元", "井", "勿", "尤", "五", "屯", "巴", "毋", ], ), ( 5, &[ "玄", "瓦", "甘", "生", "用", "田", "疋", "疒", "癶", "白", "皮", "皿", "目", "矛", "矢", "石", "示", "禸", "禾", "穴", "立", "⻂", "世", "巨", "冊", "母", "⺲", "牙", ], ), ( 6, &[ "瓜", "竹", "米", "糸", "缶", "羊", "羽", "而", "耒", "耳", "聿", "肉", "自", "至", "臼", "舌", "舟", "艮", "色", "虍", "虫", "血", "行", "衣", "西", ], ), ( 7, &[ "臣", "見", "角", "言", "谷", "豆", "豕", "豸", "貝", "赤", "走", "足", "身", "車", "辛", "辰", "酉", "釆", "里", "舛", "麦", ], ), ( 8, &[ "金", "長", "門", "隶", "隹", "雨", "青", "非", "奄", "岡", "免", "斉", ], ), ( 9, &[ "面", "革", "韭", "音", "頁", "風", "飛", "食", "首", "香", "品", ], ), ( 10, &["馬", "骨", "高", "髟", "鬥", "鬯", "鬲", "鬼", "竜", "韋"], ), (11, &["魚", "鳥", "鹵", "鹿", "麻", "亀", "啇", "黄", "黒"]), (12, &["黍", "黹", "無", "歯"]), (13, &["黽", "鼎", "鼓", "鼠"]), (14, &["鼻", "齊"]), (17, &["龠"]), ]; /// Returns true if `lit` is a radical #[inline] pub fn is_radical(lit: char) -> bool { RADICALS .iter() .any(|i| i.1.iter().any(|j| j.chars().next().unwrap() == lit)) } /// Returns a radical literal with its stroke count if found #[inline] pub fn get_stroke_count(lit: char) -> Option { RAD_STROKE_MAP.get(&lit).copied() } ================================================ FILE: lib/localization/Cargo.toml ================================================ [package] name = "localization" version = "0.1.0" authors = ["jojii "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] dyn-fmt = "0.3.0" gettext = "0.4.0" log = "0.4.19" strum = "0.25.0" strum_macros = "0.25.1" ================================================ FILE: lib/localization/src/error.rs ================================================ #[derive(Debug)] pub enum Error { Gettext(gettext::Error), Io(std::io::Error), DefaultNotFound, } impl From for Error { fn from(err: gettext::Error) -> Self { Self::Gettext(err) } } impl From for Error { fn from(err: std::io::Error) -> Self { Self::Io(err) } } ================================================ FILE: lib/localization/src/language.rs ================================================ use strum_macros::{AsRefStr, Display, EnumString}; use crate::traits::Translatable; /// Supported languages for translation #[derive(Copy, Clone, AsRefStr, EnumString, Display, Eq, PartialEq, Hash, Debug)] #[repr(u8)] pub enum Language { #[strum(serialize = "en", serialize = "en-US")] English, #[strum(serialize = "de", serialize = "de-DE")] German, #[strum(serialize = "ru")] Russain, #[strum(serialize = "sp", serialize = "es-ES")] Spanish, #[strum(serialize = "sw", serialize = "sv-SE")] Swedish, #[strum(serialize = "fr", serialize = "fr-FR")] French, #[strum(serialize = "nl", serialize = "nl-NL")] Dutch, #[strum(serialize = "hu")] Hungarian, #[strum(serialize = "sv", serialize = "sl-SL", serialize = "svl")] Slovenian, #[strum(serialize = "jp", serialize = "ja-JP")] Japanese, } impl Default for Language { #[inline] fn default() -> Self { Self::English } } impl Translatable for Language { #[inline] fn get_id(&self) -> &'static str { match self { Language::English => "English", Language::German => "German", Language::Russain => "Russian", Language::Spanish => "Spanish", Language::Swedish => "Swedish", Language::French => "French", Language::Dutch => "Dutch", Language::Hungarian => "Hungarian", Language::Slovenian => "Slovenian", Language::Japanese => "Japanese", } } } ================================================ FILE: lib/localization/src/lib.rs ================================================ pub mod error; pub mod language; pub mod traits; use std::{collections::HashMap, fmt::Display, fs::File, str::FromStr}; use error::Error; use gettext::Catalog; use language::Language; use log::{debug, error}; /// A Dictionary of multiple catalogs assigned to its languages. Requires at least one cataloge /// for the defined [`default_lang`] pub struct TranslationDict { catalogs: HashMap, default_lang: Language, } impl TranslationDict { /// Creates a new [`TranslationDict`] value with the catalogs available in [`path`]. Parses the /// file names based into their representing [`Language`]. pub fn new(path: &str, default_lang: Language) -> Result { let mut catalogs = HashMap::new(); debug!("Loading locales from: {}", path); // Initialize catalogs for file in std::fs::read_dir(path)? { let file = file?; let file_path = file.path(); let stem = file_path.file_stem().unwrap().to_str().unwrap(); // Ignore non .mo files if file_path .extension() .and_then(|ext| ext.to_str()) .and_then(|ext| (ext.ends_with("mo")).then(|| 1)) .is_none() { continue; } if let Ok(language) = Language::from_str(stem) { let catalog = Catalog::parse(File::open(file_path)?)?; catalogs.insert(language, catalog); debug!("Loaded locale: {:?}", language); } else { error!("Unknown language: {}", stem); } } // Check if `default_lang` is included if catalogs.get(&default_lang).is_none() { return Err(Error::DefaultNotFound); } Ok(TranslationDict { catalogs, default_lang, }) } /// Returns the singular translation of `msg_id` from the given catalog /// or `msg_id` itself if a translation does not exist. pub fn gettext<'a>(&'a self, msg_id: &'a str, language: Option) -> &'a str { self.get_catalog(language).gettext(msg_id) } /// Returns the plural translation of `msg_id` from the given catalog /// with the correct plural form for the number `n` of objects. /// Returns msg_id if a translation does not exist and `n == 1`, /// msg_id_plural otherwise. pub fn ngettext<'a>( &'a self, msg_id: &'a str, msg_id_plural: &'a str, n: u64, language: Option, ) -> &'a str { self.get_catalog(language) .ngettext(msg_id, msg_id_plural, n) } /// Returns the singular translation of `msg_id` /// in the context `msg_context` /// or `msg_id` itself if a translation does not exist. pub fn pgettext<'a>( &'a self, msg_context: &'a str, msg_id: &'a str, language: Option, ) -> &'a str { self.get_catalog(language).pgettext(msg_context, msg_id) } /// Returns the plural translation of `msg_id` in the context `msg_context` /// with the correct plural form for the number `n` of objects. /// Returns msg_id if a translation does not exist and `n == 1`, /// msg_id_plural otherwise. pub fn npgettext<'a>( &'a self, msg_context: &'a str, msg_id: &'a str, msg_id_plural: &'a str, n: u64, language: Option, ) -> &'a str { self.get_catalog(language) .npgettext(msg_context, msg_id, msg_id_plural, n) } /// Returns the singular translation of `msg_id` from the given catalog /// or `msg_id` itself if a translation does not exist. pub fn gettext_fmt( &self, msg_id: &str, values: &[T], language: Option, ) -> String { format(self.gettext(msg_id, language), values) } /// Returns the plural translation of `msg_id` from the given catalog /// with the correct plural form for the number `n` of objects. /// Returns msg_id if a translation does not exist and `n == 1`, /// msg_id_plural otherwise. pub fn ngettext_fmt( &self, msg_id: &str, msg_id_plural: &str, n: u64, values: &[T], language: Option, ) -> String { format(self.ngettext(msg_id, msg_id_plural, n, language), values) } /// Returns the singular translation of `msg_id` /// in the context `msg_context` /// or `msg_id` itself if a translation does not exist. pub fn pgettext_fmt( &self, msg_context: &str, msg_id: &str, values: &[T], language: Option, ) -> String { format(self.pgettext(msg_context, msg_id, language), values) } /// Returns the plural translation of `msg_id` in the context `msg_context` /// with the correct plural form for the number `n` of objects. /// Returns msg_id if a translation does not exist and `n == 1`, /// msg_id_plural otherwise. pub fn npgettext_fmt( &self, msg_context: &str, msg_id: &str, msg_id_plural: &str, n: u64, values: &[T], language: Option, ) -> String { format( self.npgettext(msg_context, msg_id, msg_id_plural, n, language), values, ) } /// Returns the catalog for the given language pub fn get_catalog(&self, language: Option) -> &Catalog { let language = language.unwrap_or_default(); self.catalogs .get(&language) .unwrap_or_else(|| self.get_default_catalog()) } /// Returns the default catalog pub fn get_default_catalog(&self) -> &Catalog { self.catalogs .get(&self.default_lang) .expect("Missing default catalog") } } /// Formats the input with the passed values and returns a newly allocated owned String fn format(inp: &str, values: &[T]) -> String { use dyn_fmt::AsStrFormatExt; let placeholder_count = count_placeholder(inp); if placeholder_count != values.len() { if values.len() == 1 { let first = values[0].clone(); let mut values = values.to_vec(); for _ in 0..placeholder_count - 1 { values.push(first.clone()); } return inp.format(&values); } } inp.format(values) } fn count_placeholder(inp: &str) -> usize { inp.matches("{}").count() } ================================================ FILE: lib/localization/src/traits.rs ================================================ use std::fmt::Display; use super::language::Language; use super::TranslationDict; impl Translatable for &'static str { #[inline] fn get_id(&self) -> &'static str { self } } impl TranslatablePlural for &'static str { #[inline] fn get_plural_id(&self) -> &'static str { self } } /// This trait allows any objects after implementation to be translated (in singular) using `dict` pub trait Translatable { /// Has to return a unique MsgID which has to represent a msgid within the po file(s) fn get_id(&self) -> &'static str; /// Returns the singular translation of `msg_id` from the given catalog /// or `msg_id` itself if a translation does not exist. fn gettext<'a>(&self, dict: &'a TranslationDict, language: Option) -> &'a str { dict.gettext(self.get_id(), language) } /// Returns the singular translation of `msg_id` in the context `msg_context` /// or `msg_id` itself if a translation does not exist. fn pgettext<'a>( &self, dict: &'a TranslationDict, context: &'a str, language: Option, ) -> &'a str { dict.pgettext(context, self.get_id(), language) } /// Returns the singular translation of `msg_id` from the given catalog /// or `msg_id` itself if a translation does not exist. fn gettext_fmt<'a, T: Display + Sized + Clone>( &self, dict: &'a TranslationDict, values: &[T], language: Option, ) -> String { dict.gettext_fmt(self.get_id(), values, language) } /// Returns the singular translation of `msg_id` in the context `msg_context` /// or `msg_id` itself if a translation does not exist. fn pgettext_fmt( &self, dict: &TranslationDict, context: &str, values: &[T], language: Option, ) -> String { dict.pgettext_fmt(context, self.get_id(), values, language) } /// Like gettext but returns an owned string fn gettext_custom(&self, dict: &TranslationDict, language: Option) -> String { dict.gettext(self.get_id(), language).to_owned() } } /// This trait allows any objects after implementation to be translated (in plural) using `dict` pub trait TranslatablePlural: Translatable { /// Has to return a unique MsgID which has to represent a msgid_plural within the po file(s) fn get_plural_id(&self) -> &'static str; /// Returns the singular translation of `msg_id` from the given catalog /// or `msg_id` itself if a translation does not exist. fn ngettext<'a>( &self, dict: &'a TranslationDict, n: u64, language: Option, ) -> &'a str { dict.ngettext(self.get_id(), self.get_plural_id(), n, language) } /// Returns the singular translation of `msg_id` in the context `msg_context` /// or `msg_id` itself if a translation does not exist. fn npgettext<'a>( &self, dict: &'a TranslationDict, context: &'a str, n: u64, language: Option, ) -> &'a str { dict.npgettext(context, self.get_id(), self.get_plural_id(), n, language) } /// Returns the singular translation of `msg_id` from the given catalog /// or `msg_id` itself if a translation does not exist. fn ngettext_fmt<'a, T: Display + Sized + Clone>( &self, dict: &'a TranslationDict, n: u64, values: &[T], language: Option, ) -> String { dict.ngettext_fmt(self.get_id(), self.get_plural_id(), n, values, language) } /// Returns the singular translation of `msg_id` in the context `msg_context` /// or `msg_id` itself if a translation does not exist. fn npgettext_fmt( &self, dict: &TranslationDict, context: &str, n: u64, values: &[T], language: Option, ) -> String { dict.npgettext_fmt( context, self.get_id(), self.get_plural_id(), n, values, language, ) } } ================================================ FILE: lib/news/Cargo.toml ================================================ [package] name = "news" version = "0.1.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] utils = { path = "../utils" } itertools = "0.11.0" once_cell = { version = "1.18.0", default-features = false } comrak = { version = "0.18.0", default-features = false } [features] default = [] ================================================ FILE: lib/news/src/lib.rs ================================================ use std::{ path::Path, sync::{Arc, Mutex}, }; use comrak::ComrakOptions; #[cfg(feature = "news_inotify")] use inotify::{EventMask, Inotify, WatchMask}; use itertools::Itertools; use once_cell::sync::Lazy; pub static NEWS_RETRIEVE: Lazy>> = Lazy::new(|| Mutex::new(Arc::new(News::default()))); /// Contains a set of News entries ordered by oldest -> newest #[derive(Default, Debug, Clone)] pub struct News { pub entries: Vec, } #[derive(Default, Debug, Clone)] pub struct NewsEntry { pub id: u32, pub title: String, pub long: String, pub short: String, pub creation_time: u64, pub was_trimmed: bool, } impl News { /// Load news from a folder pub fn init>(path: P) -> Result<(), Box> { let p = path.as_ref().to_str().unwrap().to_string(); let update = |p: &str| { *NEWS_RETRIEVE.lock().unwrap() = Arc::new(Self::load(p).unwrap()); }; update(&p); #[cfg(feature = "news_inotify")] fs_changed_update(p, update); Ok(()) } pub fn load>(path: P) -> Result> { let mut entries: Vec = Vec::new(); for (pos, file) in std::fs::read_dir(path)?.enumerate() { let file = file?; let file_name = file.file_name().to_string_lossy().to_string(); if !file_name.contains(';') { continue; } let mut fn_split = file_name.split(';'); let creation_time: u64 = fn_split.next().unwrap().parse()?; let title = fn_split.join(";"); let id = pos as u32; let (short, long) = parse_markdown(file.path())?; entries.push(NewsEntry { id, title, creation_time, was_trimmed: short != long, long, short, }); } entries.sort_by(|a, b| a.creation_time.cmp(&b.creation_time)); let entry_count = entries.len(); // Only load 15 latest news let entries = entries .into_iter() .skip(entry_count.saturating_sub(15)) .collect::>(); Ok(News { entries }) } /// Returns an iterator over last `limit` news elements from old -> newest pub fn last_entries(&self, limit: usize) -> impl Iterator { self.entries .iter() .skip(self.entries.len() - limit.min(self.entries.len())) } /// Returns a news entry by its ID pub fn by_id(&self, id: u32) -> Option<&NewsEntry> { self.entries.iter().find(|i| i.id == id) } } /// Returns a reference to the loaded news entries #[inline] pub fn get() -> Arc { NEWS_RETRIEVE.lock().unwrap().clone() } fn parse_markdown>(file: P) -> Result<(String, String), Box> { let contents = std::fs::read_to_string(file)?; let short_md = shorten_markdown(&contents); let mut md_options = ComrakOptions::default(); md_options.render.unsafe_ = true; md_options.extension.autolink = true; md_options.extension.tasklist = true; md_options.extension.strikethrough = true; let short_html = comrak::markdown_to_html(&short_md, &md_options); let full_html = comrak::markdown_to_html(&contents, &md_options); Ok((short_html, full_html)) } fn shorten_markdown(full: &str) -> String { let line_count = full.split('\n').count().max(1); let conten_len = utils::real_string_len(full); let mut text_iter = full.split('\n').filter(|i| !i.trim().starts_with('#')); let out; if conten_len > 100 { if line_count > 3 { out = text_iter.take(3).join("\n"); } else { out = text_iter.join("\n"); } } else { out = text_iter.join("\n"); } out } ================================================ FILE: lib/resources/Cargo.toml ================================================ [package] name = "resources" version = "0.1.0" edition = "2021" [dependencies] types = { path = "../types", features = ["jotoba_intern"] } japanese = { path = "../japanese" } intmap = { git = "https://github.com/JojiiOfficial/rust-intmap" } serde = { version = "1.0.171", features = ["derive"] } bincode = "1.3.3" once_cell = "1.18.0" sorted_intersection = "1.2.0" strum = "0.25.0" strum_macros = "0.25.1" ids_parser = { git = "https://github.com/JojiiOfficial/IDS-Parser" } #ids_parser = { path = "../../../ids_parser" } ================================================ FILE: lib/resources/build.rs ================================================ use std::process::Command; fn main() { let git_hash = Command::new("git") .args(&["rev-parse", "HEAD"]) .output() .ok() .and_then(|output| String::from_utf8(output.stdout).ok()) .unwrap_or_else(|| String::from("")); println!("cargo:rustc-env=GIT_HASH={}", git_hash); } ================================================ FILE: lib/resources/src/lib.rs ================================================ pub mod retrieve; pub mod storage; pub use storage::{feature::Feature, ResourceStorage}; use once_cell::sync::{Lazy, OnceCell}; use std::{ error::Error, fs::File, io::{BufReader, Write}, path::Path, }; /// Static git hash of current build pub const GIT_HASH: &str = env!("GIT_HASH"); /// List of features that are required for Jotoba to run properly pub const REQUIRED_FEATURES: &[Feature] = &[ Feature::Words, Feature::Sentences, Feature::Names, Feature::Kanji, Feature::RadicalKanjiMap, // Feature::RadicalData, ]; /// InMemory storage for all data static STORAGE: OnceCell = OnceCell::new(); /// Lazy resource storage for tests pub static LAZY_STORAGE: Lazy = Lazy::new(|| { let path = std::env::var("STORAGE_DATA").expect("missing STORAGE_DATA"); load_raw(&path).expect("Failed to load test resources") }); /// Get loaded storage data #[inline(always)] pub fn get() -> &'static ResourceStorage { // Safety: // The STORAGE cell gets initialized once at the beginning which is absolutely necessary for // the program to work. It won't be unset so its always safe unsafe { STORAGE.get_unchecked() } } /// Returns `true` if the storage is loaded #[inline(always)] pub fn is_loaded() -> bool { STORAGE.get().is_some() } /// Load the resource storage and returns it pub fn load_raw>(path: P) -> Result> { let mut reader = BufReader::new(File::open(path)?); Ok(bincode::deserialize_from(&mut reader)?) } /// Load the resource storage from a file. Returns `true` if it wasn't loaded before pub fn load>(path: P) -> Result> { if is_loaded() { return Ok(true); } Ok(STORAGE.set(load_raw(path)?).is_ok()) } /// Serializes a ResourceStorage into `output` pub fn store(output: W, storage: &ResourceStorage) -> Result<(), Box> { bincode::serialize_into(output, storage)?; Ok(()) } pub fn set(res_storage: ResourceStorage) { STORAGE.set(res_storage).ok(); } pub fn wait() { STORAGE.wait(); } ================================================ FILE: lib/resources/src/retrieve/kanji.rs ================================================ use ids_parser::IDS; use sorted_intersection::SortedIntersection; use types::jotoba::kanji::{radical::DetailedRadical, Kanji}; use super::super::storage::kanji::KanjiStorage; #[derive(Clone, Copy)] pub struct KanjiRetrieve<'a> { storage: &'a KanjiStorage, } impl<'a> KanjiRetrieve<'a> { #[inline(always)] pub(crate) fn new(storage: &'a KanjiStorage) -> Self { KanjiRetrieve { storage } } /// Get a kanji by its sequence id #[inline] pub fn by_literal(&self, literal: char) -> Option<&'a Kanji> { self.storage.literal_index.get(literal as u32) } /// Returns `true` if the index has the literal #[inline] pub fn has_literal(&self, literal: char) -> bool { self.storage.literal_index.contains_key(literal as u32) } /// Returns all kanji with the given radicals #[inline] pub fn by_radicals(&self, radicals: &[char]) -> Vec<&'a Kanji> { let rad_map = &self.storage.radical_map; let mut maps = radicals .iter() .filter_map(|i| rad_map.get(i).map(|i| i.iter())) .collect::>(); if maps.is_empty() { return vec![]; } SortedIntersection::new(&mut maps) .filter_map(|i| self.by_literal(*i)) .collect::>() } /// Returns all kanji with given jlpt level #[inline] pub fn by_jlpt(&self, jlpt: u8) -> Option<&'a Vec> { self.storage.jlpt_data.get(&jlpt) } /// Returns an iterator over all radicals #[inline] pub fn radicals(&self) -> impl Iterator { self.storage.radical_data.iter().map(|i| i.1) } /// Returns a list of kanji taught in given genki_lesson #[inline] pub fn by_genki_lesson(&self, genki_lektion: u8) -> Option<&'a Vec> { self.storage.genki_levels.get(&genki_lektion) } #[inline] pub fn iter(&self) -> impl Iterator { self.storage.literal_index.iter().map(|i| i.1) } #[inline] pub fn all(&self) -> Vec { self.iter().cloned().collect() } #[inline] pub fn ids(&self, kanji_lit: char) -> Option<&'a IDS> { self.storage.ids_index.get(&kanji_lit) } /// Returns the count of kanji #[inline] pub fn count(&self) -> usize { self.storage.literal_index.len() } } impl japanese::furigana::generate::ReadingRetrieve for KanjiRetrieve<'_> { #[inline] fn onyomi(&self, lit: char) -> Vec { self.by_literal(lit) .map(|i| i.onyomi.clone()) .unwrap_or_default() } #[inline] fn kunyomi(&self, lit: char) -> Vec { self.by_literal(lit) .map(|i| i.kunyomi.clone()) .unwrap_or_default() } } ================================================ FILE: lib/resources/src/retrieve/mod.rs ================================================ pub mod kanji; pub mod name; pub mod sentence; pub mod word; ================================================ FILE: lib/resources/src/retrieve/name.rs ================================================ use super::super::storage::name::NameStorage; use types::jotoba::names::Name; #[derive(Clone, Copy)] pub struct NameRetrieve<'a> { storage: &'a NameStorage, } impl<'a> NameRetrieve<'a> { #[inline(always)] pub(crate) fn new(storage: &'a NameStorage) -> Self { NameRetrieve { storage } } /// Get a name by its sequence id #[inline] pub fn by_sequence(&self, seq_id: u32) -> Option<&'a Name> { self.storage.names.get(&seq_id) } /// Returns the amount of names #[inline] pub fn count(&self) -> usize { self.storage.names.len() } /// Returns an iterator over all names #[inline] pub fn iter(&self) -> impl Iterator { self.storage.names.iter().map(|i| i.1) } } ================================================ FILE: lib/resources/src/retrieve/sentence.rs ================================================ use super::super::storage::sentence::SentenceStorage; use types::jotoba::sentences::{tag::Tag, Sentence}; #[derive(Clone, Copy)] pub struct SentenceRetrieve<'a> { storage: &'a SentenceStorage, } impl<'a> SentenceRetrieve<'a> { #[inline(always)] pub(crate) fn new(storage: &'a SentenceStorage) -> Self { SentenceRetrieve { storage } } /// Returns a sentence by its id or `None` if no sentence for the given ID exists #[inline] pub fn by_id(&self, id: u32) -> Option<&'a Sentence> { self.storage.sentences.get(id) } /// Returns an iterator over all sentences with given `jlpt` level #[inline] pub fn ids_by_jlpt(&self, jlpt: u8) -> impl Iterator + 'a { self.storage .jlpt_map .get(&jlpt) .into_iter() .flatten() .copied() } /// Returns an iterator over all sentences with given `tag` #[inline] pub fn by_tag<'b>(&'b self, tag: &Tag) -> impl Iterator + 'b { self.storage .tag_map .get(tag) .into_iter() .flatten() .filter_map(move |i| self.by_id(*i)) } /// Returns an iterator over all sentences with given `jlpt` level #[inline] pub fn by_jlpt<'b>(&'b self, jlpt: u8) -> impl Iterator + 'b { self.storage .jlpt_map .get(&jlpt) .into_iter() .flatten() .filter_map(move |i| self.by_id(*i)) } #[inline] pub fn count(&self) -> usize { self.storage.sentences.len() } #[inline] pub fn iter(&self) -> impl Iterator { self.storage.sentences.iter().map(|i| i.1) } } ================================================ FILE: lib/resources/src/retrieve/word.rs ================================================ use super::super::storage::word::WordStorage; use types::jotoba::words::{misc::Misc, part_of_speech::PosSimple, Word}; #[derive(Clone, Copy)] pub struct WordRetrieve<'a> { storage: &'a WordStorage, } impl<'a> WordRetrieve<'a> { #[inline(always)] pub(crate) fn new(storage: &'a WordStorage) -> Self { WordRetrieve { storage } } /// Get a word by its sequence id #[inline] pub fn by_sequence(&self, seq_id: u32) -> Option<&'a Word> { self.storage.words.get(seq_id) } /// Returns an iterator over all words #[inline] pub fn iter(&self) -> impl Iterator { self.storage.words.iter().map(|i| i.1) } /// returns an iterator over all katakana words pub fn katakana<'b>(&'b self) -> impl Iterator + 'b + DoubleEndedIterator { self.storage .katakana_words .iter() .copied() .filter_map(|seq| self.by_sequence(seq)) } /// returns an iterator over all irregular ichidan words pub fn irregular_ichidan<'b>( &'b self, ) -> impl Iterator + 'b + DoubleEndedIterator { self.storage .irregular_ichidan .iter() .copied() .filter_map(|seq| self.by_sequence(seq)) } /// Returns the amount of irregular ichidan words that have been indexed #[inline] pub fn irregular_ichidan_len(&self) -> usize { self.storage.irregular_ichidan.len() } /// Returns the amount of katakana words that have been indexed #[inline] pub fn katakana_len(&self) -> usize { self.storage.katakana_words.len() } /// Returns an iterator over all words with given `jlpt` level #[inline] pub fn by_jlpt<'b>( &'b self, jlpt: u8, ) -> impl Iterator + 'b + DoubleEndedIterator { self.storage .jlpt_word_map .get(&jlpt) .into_iter() .flatten() .filter_map(move |i| self.by_sequence(*i)) } /// Returns the amount of words indexed for given jlpt level #[inline] pub fn jlpt_len(&self, jlpt: u8) -> Option { self.storage.jlpt_word_map.get(&jlpt).map(|i| i.len()) } /// Returns an iterator over all words with given `misc` #[inline] pub fn by_pos_simple<'b>( &'b self, pos: PosSimple, ) -> impl Iterator + 'b + DoubleEndedIterator { self.storage .pos_map .get(&(pos as u8)) .into_iter() .flatten() .filter_map(move |i| self.by_sequence(*i)) } /// Returns the amount of words indexed for `pos` #[inline] pub fn pos_simple_len(&self, pos: &PosSimple) -> Option { self.storage.pos_map.get(&(*pos as u8)).map(|i| i.len()) } /// Returns an iterator over all words with given `misc` #[inline] pub fn by_misc<'b>( &'b self, misc: Misc, ) -> impl Iterator + 'b + DoubleEndedIterator { self.storage .misc_map .get(&(misc as u8)) .into_iter() .flatten() .filter_map(move |i| self.by_sequence(*i)) } /// Returns the amount of words indexed for misc #[inline] pub fn misc_len(&self, misc: &Misc) -> Option { self.storage.misc_map.get(&(*misc as u8)).map(|i| i.len()) } /// Returns the total count of words #[inline] pub fn count(&self) -> usize { self.storage.count() } } ================================================ FILE: lib/resources/src/storage/feature.rs ================================================ use strum::{EnumIter, IntoEnumIterator}; #[derive(Clone, Copy, PartialEq, Eq, Debug, EnumIter)] pub enum Feature { // ----- Basic ones ----- Words, Sentences, Names, Kanji, /// RadicalToKanji RadicalKanjiMap, /// DetailedRadicals RadicalData, // ----- Other ------ // Sentences SentenceJLPT, SentenceTags, // Words WordIrregularIchidan, WordKatakana, WordPitch, SentenceAvailable, WordJlpt, // Kanji GenkiTags, SimilarKanji, KanjiDecompositions, } impl Feature { pub fn all() -> Vec { Feature::iter().collect() } } ================================================ FILE: lib/resources/src/storage/kanji.rs ================================================ use ids_parser::IDS; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use types::jotoba::kanji::{radical::DetailedRadical, Kanji}; use super::feature::Feature; /// Storage containing all data related to kanji #[derive(Serialize, Deserialize, Default, Clone)] pub struct KanjiStorage { /// Index mapping kanji literals to `Kanji` data pub literal_index: intmap::IntMap, /// Mapping from a radical to a list of kanji using this radical pub radical_map: HashMap>, /// Maps radical literal to its detailed radical data pub radical_data: HashMap, /// Jlpt mapping for kanji pub jlpt_data: HashMap>, // Search tags pub genki_levels: HashMap>, /// IDS index for kanji decomposition graph pub ids_index: HashMap, has_similar_kanji: bool, } impl KanjiStorage { pub fn new() -> Self { Self::default() } /// Insert kanji into the KanjiStorage pub fn insert_kanji(&mut self, kanji: Vec) { self.literal_index.clear(); self.jlpt_data.clear(); for kanji in kanji { if let Some(jlpt) = kanji.jlpt { self.jlpt_data.entry(jlpt).or_default().push(kanji.literal); } if !self.has_similar_kanji && !kanji.similar_kanji.is_empty() { self.has_similar_kanji = true; } self.literal_index.insert(kanji.literal as u32, kanji); } } /// Insert radical detail data pub fn insert_radicals(&mut self, radicals: Vec) { self.radical_data.clear(); for radical in radicals { self.radical_data.insert(radical.literal, radical); } } pub fn get_features(&self) -> Vec { let mut out = vec![]; if !self.literal_index.is_empty() { out.push(Feature::Kanji); } if !self.genki_levels.is_empty() { out.push(Feature::GenkiTags); } if !self.radical_data.is_empty() { out.push(Feature::RadicalData); } if !self.radical_map.is_empty() { out.push(Feature::RadicalKanjiMap); } if self.has_similar_kanji { out.push(Feature::SimilarKanji); } if !self.ids_index.is_empty() { out.push(Feature::KanjiDecompositions); } out } } ================================================ FILE: lib/resources/src/storage/mod.rs ================================================ pub mod feature; pub mod kanji; pub mod name; pub mod sentence; pub mod word; use super::retrieve::{ kanji::KanjiRetrieve, name::NameRetrieve, sentence::SentenceRetrieve, word::WordRetrieve, }; use self::{ feature::Feature, kanji::KanjiStorage, name::NameStorage, sentence::SentenceStorage, word::WordStorage, }; use serde::{Deserialize, Serialize}; use strum::IntoEnumIterator; /// Storage holding all data of Jotoba #[derive(Serialize, Deserialize, Default, Clone)] pub struct ResourceStorage { pub words: WordStorage, pub kanji: KanjiStorage, pub names: NameStorage, pub sentences: SentenceStorage, } impl ResourceStorage { /// Create a new empty `ResourceStorage` pub fn new() -> Self { Self::default() } /// Returns `true` if all necessary features are present pub fn check(&self) -> bool { self.missing_but_required().is_empty() } pub fn missing_but_required(&self) -> Vec { let missing = self.missing_features(); let mut out = vec![]; for req_feature in super::REQUIRED_FEATURES { if missing.contains(req_feature) { out.push(*req_feature); } } out } /// Returns a list of features that are missing but required pub fn missing_features(&self) -> Vec { let features = self.get_features(); let mut missing = vec![]; for feature in Feature::iter() { if !features.contains(&feature) { missing.push(feature); } } missing } /// Returns `true` if ResourceStorage has the given feature #[inline] pub fn has_feature(&self, feature: Feature) -> bool { self.get_features().contains(&feature) } /// Returns a list of all features of the ResourceStorage's data pub fn get_features(&self) -> Vec { let mut out = vec![]; out.extend(self.words.get_features()); out.extend(self.kanji.get_features()); out.extend(self.names.get_features()); out.extend(self.sentences.get_features()); out } } // Retrieve functions // `ResourceStorage::check` is supposed to be called at the begininng to ensure // those fields are not unset impl ResourceStorage { /// Get a reference to the resource storage's words. #[inline(always)] pub fn words<'a>(&'a self) -> WordRetrieve<'a> { WordRetrieve::new(&self.words) } /// Get a reference to the resource storage's kanji. #[inline(always)] pub fn kanji(&self) -> KanjiRetrieve { KanjiRetrieve::new(&self.kanji) } /// Get a reference to the resource storage's names. #[inline(always)] pub fn names(&self) -> NameRetrieve { NameRetrieve::new(&self.names) } /// Get a reference to the resource storage's sentences. #[inline(always)] pub fn sentences(&self) -> SentenceRetrieve { SentenceRetrieve::new(&self.sentences) } } ================================================ FILE: lib/resources/src/storage/name.rs ================================================ use std::collections::HashMap; use super::feature::Feature; use serde::{Deserialize, Serialize}; use types::jotoba::names::Name; /// Storage containing all data related to names #[derive(Serialize, Deserialize, Default, Clone)] pub struct NameStorage { /// Index mapping name id to its `Name` value pub names: HashMap, } impl NameStorage { pub fn new() -> Self { Self::default() } /// Insert names into the NameStorage pub fn insert_names(&mut self, names: Vec) { self.names.clear(); for name in names { self.names.insert(name.sequence, name); } } pub fn get_features(&self) -> Vec { let mut out = vec![]; if !self.names.is_empty() { out.push(Feature::Names); } out } } ================================================ FILE: lib/resources/src/storage/sentence.rs ================================================ use super::feature::Feature; use intmap::IntMap; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use types::jotoba::sentences::{tag::Tag, Sentence}; /// Storage for sentence related data #[derive(Serialize, Deserialize, Clone, Default)] pub struct SentenceStorage { /// Mapping sentence by its ID pub sentences: IntMap, /// Mappings of tags to sentences with this tag pub tag_map: HashMap>, // Search tags pub jlpt_map: HashMap>, } impl SentenceStorage { pub fn new() -> Self { Self::default() } pub fn get_features(&self) -> Vec { let mut out = vec![]; if !self.sentences.is_empty() { out.push(Feature::Sentences); } if !self.tag_map.is_empty() { out.push(Feature::SentenceTags); } if !self.jlpt_map.is_empty() { out.push(Feature::SentenceJLPT); } out } } ================================================ FILE: lib/resources/src/storage/word.rs ================================================ use intmap::IntMap; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use types::jotoba::words::Word; use super::feature::Feature; /// Storage containing all data related to words #[derive(Serialize, Deserialize, Default, Clone)] pub struct WordStorage { /// Word index pub words: IntMap, // Search tags pub jlpt_word_map: HashMap>, pub irregular_ichidan: Vec, pub pos_map: HashMap>, pub misc_map: HashMap>, pub katakana_words: Vec, // Feature information has_accents: bool, has_sentence_mapping: bool, has_jlpt: bool, } impl WordStorage { pub fn new() -> Self { Self::default() } /// Returns the amounot of words in the WordStorage #[inline] pub fn count(&self) -> usize { self.words.len() } /// Inserts words into the WordStorage pub fn insert_words(&mut self, words: Vec) { self.clear_words(); for word in words { if let Some(jlpt) = word.get_jlpt_lvl() { self.jlpt_word_map .entry(jlpt) .or_default() .push(word.sequence); self.has_jlpt = true; } if !self.has_accents && word.accents.count() > 0 { self.has_accents = true; } self.words.insert(word.sequence, word); } for (_, v) in self.jlpt_word_map.iter_mut() { v.sort(); } } pub fn update_sentence_mapping(&mut self) { self.has_sentence_mapping = self.words.iter().any(|i| i.1.sentences_available > 0); } pub fn get_features(&self) -> Vec { let mut out = vec![]; if !self.words.is_empty() { out.push(Feature::Words); } if !self.irregular_ichidan.is_empty() { out.push(Feature::WordIrregularIchidan); } if !self.katakana_words.is_empty() { out.push(Feature::WordKatakana); } if self.has_sentence_mapping { out.push(Feature::SentenceAvailable); } if self.has_accents { out.push(Feature::WordPitch); } if self.has_jlpt { out.push(Feature::WordJlpt); } out } fn clear_words(&mut self) { self.words.clear(); self.jlpt_word_map.clear(); self.has_accents = false; self.has_sentence_mapping = false; } } ================================================ FILE: lib/search/Cargo.toml ================================================ [package] name = "search" version = "0.1.0" authors = ["jojii "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] japanese = { path = "../japanese" } engine = { path = "../engine" } sentence_reader = { path = "../sentence_reader", features = ["jotoba_intern"] } error = { path = "../error" } utils = { path = "../utils" } indexes = { path = "../indexes" } localization = { path = "../localization" } resources = { path = "../resources" } types = { path = "../types", default-features = false, features = [ "jotoba_intern", ] } log = "0.4.19" itertools = "0.11.0" once_cell = { version = "1.18.0", default-features = false } regex = { version = "1.9.1", features = ["std"], default-features = false } percent-encoding = "2.3.0" rayon = "1.7.0" priority_container = { git = "https://github.com/JojiiOfficial/PrioContainer/" } #priority_container = { path = "../../../priority_container" } order_struct = { git = "https://github.com/JojiiOfficial/OrderStruct" } intmap = { git = "https://github.com/JojiiOfficial/rust-intmap" } #ngindex = { path = "../../../ngindex" } ngindex = { git = "https://github.com/JojiiOfficial/ngindex"} #ngindex2 = { path ="../../../ngindex", package = "ngindex" } #index_framework = { path = "../../../index_framework" } index_framework = { git = "https://github.com/WeDontPanic/index_framework" } #vsm = { path = "../../../vsm"} vsm = { git = "https://github.com/JojiiOfficial/VSM"} sparse_vec = { git = "https://github.com/JojiiOfficial/SparseVec"} serde = { version = "1.0.171", features = ["derive"] } bincode = "1.3.3" #ngram-tools = { path = "../../../ngram-tools"} ngram-tools = { git = "https://github.com/JojiiOfficial/ngram-tools"} jp_utils = { git = "https://github.com/JojiiOfficial/jp_utils"} japanese-number-parser = { git = "https://github.com/gorbit99/japanese-number-parser"} [dev-dependencies] test-case = "3.1.0" ================================================ FILE: lib/search/src/engine/mod.rs ================================================ pub mod names; pub mod radical; pub mod sentences; pub mod words; ================================================ FILE: lib/search/src/engine/names/foreign.rs ================================================ use index_framework::{ retrieve::{retriever::default::DefaultRetrieve, Retrieve}, traits::{backend::Backend, dictionary::IndexDictionary}, }; use indexes::{names::FOREIGN_NGRAM, words::native::N as NATIVE_NGRAM}; use ngindex::{item::IndexItem, termset::TermSet, utils::padded, NGIndex, Wordgrams}; use types::jotoba::{language::Language, names::Name}; pub struct Engine; impl engine::Engine<'static> for Engine { type B = NGIndex; type DictItem = String; type Document = IndexItem; type Retriever = DefaultRetrieve<'static, Self::B, Self::DictItem, Self::Document>; // TODO: fix NGramRetriever needing more than `limit` iterations //type Retriever = NGramRetriever<'static, NATIVE_NGRAM, Self::B, Self::DictItem, Self::Document>; type Output = &'static Name; type Query = TermSet; fn make_query>(inp: S, _: Option) -> Option { let fmt = format_word(inp.as_ref()); let dict = Self::get_index(None).dict(); let mut tids: Vec<_> = Wordgrams::new(&padded(&fmt, NATIVE_NGRAM - 1), NATIVE_NGRAM) .filter_map(|i| dict.get_id(i)) .collect(); tids.sort_unstable(); println!("{tids:#?}"); if tids.is_empty() { return None; } Some(TermSet::new(tids)) } #[inline] fn doc_to_output(input: &Self::Document) -> Option> { resources::get() .names() .by_sequence(*input.item()) .map(|i| vec![i]) } #[inline] fn get_index(_: Option) -> &'static Self::B { indexes::get().name().foreign() } #[inline] fn retrieve_for( query: &Self::Query, _q_str: &str, lang: Option, ) -> Retrieve<'static, Self::B, Self::DictItem, Self::Document> { Self::retrieve(lang).by_term_ids(query.iter().copied()) } } #[inline] fn format_word(inp: &str) -> String { let mut out = String::from(inp.to_lowercase()); for i in ".,[]() \t\"'\\/-;:".chars() { out = out.replace(i, " "); } out.to_lowercase() } ================================================ FILE: lib/search/src/engine/names/mod.rs ================================================ pub mod foreign; pub mod native; ================================================ FILE: lib/search/src/engine/names/native.rs ================================================ use index_framework::{ retrieve::{retriever::default::DefaultRetrieve, Retrieve}, traits::{backend::Backend, dictionary::IndexDictionary}, }; use indexes::words::native::N as NATIVE_NGRAM; use jp_utils::JapaneseExt; use ngindex::{item::IndexItem, termset::TermSet, utils::padded, NGIndex, Wordgrams}; use types::jotoba::{language::Language, names::Name}; pub struct Engine; impl engine::Engine<'static> for Engine { type B = NGIndex; type DictItem = String; type Document = IndexItem; type Retriever = DefaultRetrieve<'static, Self::B, Self::DictItem, Self::Document>; // TODO: fix NGramRetriever needing more than `limit` iterations //type Retriever = NGramRetriever<'static, NATIVE_NGRAM, Self::B, Self::DictItem, Self::Document>; type Output = &'static Name; type Query = TermSet; fn make_query>(inp: S, _: Option) -> Option { let dict = Self::get_index(None).dict(); let mut tids: Vec<_> = Wordgrams::new(&padded(inp.as_ref(), NATIVE_NGRAM - 1), NATIVE_NGRAM) .filter_map(|i| dict.get_id(i)) .collect(); tids.sort_unstable(); if tids.is_empty() { return None; } Some(TermSet::new(tids)) } #[inline] fn doc_to_output(input: &Self::Document) -> Option> { resources::get() .names() .by_sequence(*input.item()) .map(|i| vec![i]) } #[inline] fn get_index(_: Option) -> &'static Self::B { indexes::get().name().native() } #[inline] fn retrieve_for( query: &Self::Query, _q_str: &str, lang: Option, ) -> Retrieve<'static, Self::B, Self::DictItem, Self::Document> { let search_in; if _q_str.is_kanji() { search_in = 2; } else if _q_str.has_kanji() { search_in = 1; } else { search_in = 0; } Self::retrieve(lang) .by_term_ids(query.iter().copied()) .in_posting(search_in) } } ================================================ FILE: lib/search/src/engine/radical/mod.rs ================================================ use types::jotoba::kanji::radical::SearchRadicalInfo; /// Finds Radicals by its meaning(s). If `query_str` was not found /// as meaning of an radical, similar meanings are being searched /// and added to the result pub fn find(query_str: &str) -> Vec<&'static SearchRadicalInfo> { let mut queries = vec![query_str]; let meaning_index = indexes::get().radical().meaning_index(); if !meaning_index.has_term(query_str) { add_similar(query_str, &mut queries); } queries .into_iter() .filter_map(|term| meaning_index.get(term)) .flatten() .take(5) .collect() } /// Adds meanings of radicals with similar meaning as `query_str` to `out` fn add_similar(query_str: &str, out: &mut Vec<&str>) { let meaning_index = indexes::get().radical().meaning_index(); // Search term in meanings let mut found = meaning_index.term_tree.find(&query_str.to_string(), 2); // Show more similar terms above found.sort_by(|a, b| a.1.cmp(&b.1).reverse()); // Assign `queries` to a new vec because it can only contain in index existing terms out.extend(found.into_iter().take(3).map(|i| i.0.as_str())); } ================================================ FILE: lib/search/src/engine/sentences/foreign.rs ================================================ use index_framework::{ backend::memory::{ dict::default::Dictionary, postings::compressed::Postings, storage::default::Storage, MemBackend, }, retrieve::retriever::default::DefaultRetrieve, traits::{backend::Backend, dictionary::IndexDictionary}, }; use sparse_vec::{SpVec32, VecExt}; use types::jotoba::{language::Language, sentences::Sentence}; use vsm::{dict_term::DictTerm, doc_vec::DocVector}; pub struct Engine {} impl engine::Engine<'static> for Engine { type B = MemBackend< DictTerm, DocVector, Dictionary, Storage>, Postings, >; type DictItem = DictTerm; type Document = DocVector; type Retriever = DefaultRetrieve<'static, Self::B, Self::DictItem, Self::Document>; type Output = &'static Sentence; type Query = SpVec32; fn make_query>(inp: S, _lang: Option) -> Option { let query = inp.as_ref(); let mut terms = all_terms(&query.to_lowercase()); terms.push(query.to_string().to_lowercase()); let index = Self::get_index(None); let term_ids = terms .into_iter() .filter_map(|i| index.dict().get_id(&i)) .map(|id| (id, 1.0)); let vec = SpVec32::create_new_raw(term_ids); (!vec.is_empty()).then(|| vec) } #[inline] fn doc_to_output(input: &Self::Document) -> Option> { resources::get() .sentences() .by_id(*input.document()) .map(|i| vec![i]) } #[inline] fn get_index(_lang: Option) -> &'static Self::B { indexes::get().sentence().foreign() } #[inline] fn retrieve_for( inp: &Self::Query, _query_str: &str, lang: Option, ) -> index_framework::retrieve::Retrieve<'static, Self::B, Self::DictItem, Self::Document> { let term_iter = inp.dimensions().map(|i| i as u32); Self::retrieve(lang) .by_term_ids(term_iter) .in_posting(lang.unwrap() as u32) } } /// Splits a string into all its terms. /// /// # Example /// "make some coffee" => vec!["make","some","coffee"]; pub(crate) fn all_terms(i: &str) -> Vec { i.split(' ') .map(|i| { format_word(i) .split(' ') .map(|i| i.to_lowercase()) .filter(|i| !i.is_empty()) .collect::>() }) .flatten() .collect() } /// Replaces all special characters into spaces so we can split it down into words fn format_word(inp: &str) -> String { let mut out = String::from(inp); for i in ".,[]() \t\"'\\/-;:".chars() { out = out.replace(i, " "); } out } ================================================ FILE: lib/search/src/engine/sentences/mod.rs ================================================ pub mod foreign; pub mod native; ================================================ FILE: lib/search/src/engine/sentences/native.rs ================================================ use index_framework::{ backend::memory::{ dict::default::Dictionary, postings::compressed::Postings, storage::default::Storage, MemBackend, }, retrieve::retriever::default::DefaultRetrieve, traits::{backend::Backend, dictionary::IndexDictionary}, }; use jp_utils::JapaneseExt; use sentence_reader::output::ParseResult; use sparse_vec::{SpVec32, VecExt}; use std::collections::HashSet; use types::jotoba::{language::Language, sentences::Sentence}; use vsm::{dict_term::DictTerm, doc_vec::DocVector}; pub struct Engine {} impl engine::Engine<'static> for Engine { type B = MemBackend< DictTerm, DocVector, Dictionary, Storage>, Postings, >; type DictItem = DictTerm; type Document = DocVector; type Retriever = DefaultRetrieve<'static, Self::B, Self::DictItem, Self::Document>; type Output = &'static Sentence; type Query = SpVec32; fn make_query>(inp: S, _lang: Option) -> Option { let mut terms: HashSet = HashSet::new(); let dict = Self::get_index(None).dict(); let query = inp.as_ref(); if dict.get_id(query).is_some() { terms.insert(query.to_string()); } else { match sentence_reader::Parser::new(query).parse() { ParseResult::Sentence(s) => { terms.extend(s.iter().map(|i| i.get_inflected())); terms.extend(s.iter().map(|i| i.get_normalized())); } ParseResult::InflectedWord(w) => { let infl = w.get_inflected(); //println!("inflected: {infl:?}: {:?}", dict.get_id(&infl)); if dict.get_id(&infl).is_some() { terms.insert(infl); } else { terms.insert(w.get_normalized()); } } ParseResult::None => (), }; } //terms.retain(|w| !index.is_stopword_cust(&w, 10.0).unwrap_or(true)); let terms = terms.into_iter().map(|i| format_query(&i)).filter_map(|i| { let id = dict.get_id(&i); //let term = dict.get_term(id).unwrap(); Some((id?, 1.0)) }); let vec = SpVec32::create_new_raw(terms); (!vec.is_empty()).then(|| vec) } #[inline] fn doc_to_output(input: &Self::Document) -> Option> { resources::get() .sentences() .by_id(*input.document()) .map(|i| vec![i]) } #[inline] fn get_index(_lang: Option) -> &'static Self::B { indexes::get().sentence().native() } #[inline] fn retrieve_for( inp: &Self::Query, _query_str: &str, lang: Option, ) -> index_framework::retrieve::Retrieve<'static, Self::B, Self::DictItem, Self::Document> { let term_iter = inp.dimensions().map(|i| i as u32); if let Some(lang) = lang { Self::retrieve(Some(lang)) .by_term_ids(term_iter) .in_posting(lang as u32) } else { let langs = Language::iter_word().map(|i| i as u32); Self::retrieve(None) .by_term_ids(term_iter) .in_postings(langs) } } } #[inline] fn format_query(inp: &str) -> String { inp.to_halfwidth() } ================================================ FILE: lib/search/src/engine/words/foreign.rs ================================================ use index_framework::{ backend::memory::{ dict::default::Dictionary, postings::compressed::Postings, storage::default::Storage, MemBackend, }, retrieve::{retriever::default::DefaultRetrieve, Retrieve}, traits::{backend::Backend, dictionary::IndexDictionary}, }; use once_cell::sync::Lazy; use regex::Regex; use sparse_vec::{SpVec32, VecExt}; use types::jotoba::{language::Language, words::Word}; use vsm::{dict_term::DictTerm, doc_vec::DocVector}; pub struct Engine; const FORMAT_REGEX: Lazy = Lazy::new(|| Regex::new("^to ").unwrap()); impl engine::Engine<'static> for Engine { type B = MemBackend< DictTerm, DocVector, Dictionary, Storage>, Postings, >; type DictItem = DictTerm; type Document = DocVector; type Retriever = DefaultRetrieve<'static, Self::B, Self::DictItem, Self::Document>; type Output = &'static Word; type Query = SpVec32; fn make_query>(inp: S, lang: Option) -> Option { let query_str = format_word(inp.as_ref().trim()); let dict = Self::get_index(lang).dict(); let inp = FORMAT_REGEX.replace_all(&query_str, "").to_string(); let add_term_iter = inp .split(' ') .map(|i| i.trim()) .filter_map(|term| dict.get_id(term)) .map(|i| (i, 0.001)); let sparse = dict .get_id(&inp) .map(|i| (i, 1.0)) .into_iter() .chain(add_term_iter); let vec = SpVec32::create_new_raw(sparse); if vec.is_empty() { return None; } Some(vec) } #[inline] fn doc_to_output(input: &Self::Document) -> Option> { resources::get() .words() .by_sequence(*input.document()) .map(|i| vec![i]) } #[inline] fn get_index(lang: Option) -> &'static Self::B { indexes::get().word().foreign(lang.unwrap()).unwrap() } #[inline] fn retrieve_for( query: &Self::Query, _q_str: &str, lang: Option, ) -> Retrieve<'static, Self::B, Self::DictItem, Self::Document> { let term_iter = query.as_vec().iter().map(|i| i.0); Self::retrieve(lang).by_term_ids(term_iter) } } #[inline] fn format_word(inp: &str) -> String { let mut out = String::from(inp); for i in ".,[]() \t\"'\\/-;:・".chars() { out = out.replace(i, " "); } out.to_lowercase() } ================================================ FILE: lib/search/src/engine/words/mod.rs ================================================ pub mod foreign; pub mod native; ================================================ FILE: lib/search/src/engine/words/native/k_reading.rs ================================================ use index_framework::retrieve::retriever::default::DefaultRetrieve; use types::jotoba::language::Language; use types::jotoba::words::Word; pub struct Engine; impl engine::Engine<'static> for Engine { type B = indexes::kanji::reading::Index; type DictItem = String; type Document = u32; type Retriever = DefaultRetrieve<'static, Self::B, Self::DictItem, Self::Document>; type Output = &'static Word; type Query = String; fn make_query>(inp: S, _lang: Option) -> Option { Some(inp.as_ref().to_string()) } #[inline] fn doc_to_output(input: &Self::Document) -> Option> { resources::get() .words() .by_sequence(*input) .map(|i| vec![i]) } #[inline] fn get_index(_lang: Option) -> &'static Self::B { indexes::get().word().k_reading() } fn retrieve_for( inp: &Self::Query, _query_str: &str, _lang: Option, ) -> index_framework::retrieve::Retrieve<'static, Self::B, Self::DictItem, Self::Document> { Self::retrieve(None).by_term(inp) } } ================================================ FILE: lib/search/src/engine/words/native/mod.rs ================================================ pub mod k_reading; pub mod regex; use index_framework::{ retrieve::{retriever::default::DefaultRetrieve, Retrieve}, traits::{backend::Backend, dictionary::IndexDictionary}, }; use indexes::words::native::N as NATIVE_NGRAM; use ngindex::{item::IndexItem, termset::TermSet, utils::padded, NGIndex, Wordgrams}; use types::jotoba::{language::Language, words::Word}; pub struct Engine {} impl engine::Engine<'static> for Engine { type B = NGIndex; type DictItem = String; type Document = IndexItem; type Retriever = DefaultRetrieve<'static, Self::B, Self::DictItem, Self::Document>; // TODO: fix NGramRetriever needing more than `limit` iterations //type Retriever = NGramRetriever<'static, NATIVE_NGRAM, Self::B, Self::DictItem, Self::Document>; type Output = &'static Word; type Query = TermSet; fn make_query>(inp: S, _: Option) -> Option { let dict = Self::get_index(None).dict(); let mut tids: Vec<_> = Wordgrams::new(&padded(inp.as_ref(), NATIVE_NGRAM - 1), NATIVE_NGRAM) .filter_map(|i| dict.get_id(i)) .collect(); tids.sort_unstable(); if tids.is_empty() { return None; } Some(TermSet::new(tids)) } #[inline] fn doc_to_output(input: &Self::Document) -> Option> { resources::get() .words() .by_sequence(*input.item()) .map(|i| vec![i]) } #[inline] fn get_index(_: Option) -> &'static Self::B { indexes::get().word().native() } #[inline] fn retrieve_for( query: &Self::Query, _q_str: &str, lang: Option, ) -> Retrieve<'static, Self::B, Self::DictItem, Self::Document> { Self::retrieve(lang).by_term_ids(query.iter().copied()) } } ================================================ FILE: lib/search/src/engine/words/native/regex.rs ================================================ use crate::query::regex::RegexSQuery; use engine::utils::page_from_pqueue; use indexes::regex::RegexSearchIndex; use intmap::int_set::IntSet; use itertools::Itertools; use order_struct::order_nh::OrderVal; use priority_container::StableUniquePrioContainerMax; use types::jotoba::words::Word; /// Result of a regex search pub struct RegexSearchResult { pub items: Vec<&'static Word>, // the total amount of items the search would return. // This value is most likely different than items.len() pub item_len: usize, } pub fn search(query: &RegexSQuery, sort: F, limit: usize, offset: usize) -> RegexSearchResult where F: Fn(&Word, &str) -> usize, { let word_resources = resources::get().words(); let queue_size = limit + offset; let mut out_queue = StableUniquePrioContainerMax::new_allocated(queue_size, queue_size); let index = indexes::get().word().regex(); let possible_results = find_words(index, &query.get_chars()); for seq_id in possible_results.into_iter().sorted() { let word = word_resources.by_sequence(seq_id).unwrap(); let item_iter = word .reading_iter(true) .filter_map(|i| query.matches(&i.reading).then(|| (word, &i.reading))) .map(|(word, reading)| { let order = sort(word, reading); OrderVal::new(word, order) }); out_queue.extend(item_iter); } let item_len = out_queue.total_pushed(); let items: Vec<_> = page_from_pqueue(limit, offset, out_queue) .into_iter() .map(|i| i.into_inner()) .collect(); RegexSearchResult { items, item_len } } /// Get all indexed words using characters in `chars` pub(crate) fn find_words(index: &RegexSearchIndex, chars: &[char]) -> IntSet { if chars.is_empty() { return IntSet::new(); } let mut out = IntSet::new(); // Add words of first character to `out` let mut chars_iter = chars.iter(); // We want to fill `out` with some values. loop { let first = match chars_iter.next() { Some(f) => f, None => break, }; if let Some(v) = index.get_words_with(*first) { out.reserve(v.len()); out.extend(v.iter().copied()); // exit first found character break; } } for v in chars_iter.filter_map(|c| index.get_words_with(*c)) { out.retain(|i| v.contains(&i)); if out.is_empty() { return IntSet::new(); } } out } ================================================ FILE: lib/search/src/executor/mod.rs ================================================ pub mod out_builder; pub mod producer; pub mod search_result; pub mod searchable; use std::time::Instant; use engine::{pushable::FilteredMaxCounter, utils::page_from_pqueue_with_max_dist}; use log::debug; use out_builder::OutputBuilder; use search_result::SearchResult; use searchable::Searchable; use types::jotoba::search::guess::{Guess, GuessType}; use crate::executor::out_builder::OutputAddable; /// Max items to count for estimation pub const MAX_ESTIMATE: usize = 100; /// Executes a search pub struct SearchExecutor { search: S, } impl SearchExecutor { /// Creates a new SearchExecutor #[inline] pub fn new(search: S) -> Self { Self { search } } /// Executes the search pub fn run(self) -> SearchResult { let query = self.search.get_query(); let limit = query.settings.page_size as usize; let offset = query.page_offset; let mut out = OutputBuilder::new(|i| self.search.filter(i), limit + offset); for prod in self.search.get_producer() { if !prod.should_run(out.p.total_pushed()) { continue; } let before = out.p.total_pushed(); let start = Instant::now(); prod.produce(&mut out); let dur = start.elapsed(); let after = out.p.total_pushed(); let name = prod.name(); debug!("{name}: {} Elements in {:?}", after - before, dur); } self.search.mod_output(&mut out); if out.is_empty() && out.output_add.is_empty() { let mut res = SearchResult::default(); res.other_data = out.output_add; return res; } // Get total len of results let len; if let Some(max_top_dist) = self.search.max_top_dist() { len = out .rel_list .iter() .filter(|i| **i + max_top_dist >= out.max) .count(); } else { len = out.p.total_pushed(); } assert_eq!(out.p.total_pushed(), out.rel_list.len()); let max_top_dist = self.search.max_top_dist().unwrap_or(0.0); let items: Vec<_> = page_from_pqueue_with_max_dist(limit, offset, max_top_dist, out.max, out.p) .into_iter() .map(|i| self.search.to_output_item(i.item)) .collect(); SearchResult::with_other_data(items, len, out.output_add) } pub fn guess(&self) -> Option { let start = Instant::now(); let mut counter = FilteredMaxCounter::::new(MAX_ESTIMATE + 1, |i| self.search.filter(i)); // Keep track of real count to give `should_run` a correct value let mut c = 0; for prod in self.search.get_producer() { if !prod.should_run(c) { continue; } let old_counter = counter.val(); prod.estimate_to(&mut counter); // Add たった今数えた量 to `c` c += counter.val() - old_counter; if counter.is_full() { break; } } let sum = counter.val(); let gt; if sum > MAX_ESTIMATE { gt = GuessType::MoreThan; } else { gt = GuessType::Accurate; } debug!("Guessing took: {:?}", start.elapsed()); Some(Guess::new(sum.min(MAX_ESTIMATE) as u32, gt)) } } ================================================ FILE: lib/search/src/executor/out_builder.rs ================================================ use engine::{pushable::Pushable, relevance::item::RelItem}; use priority_container::StableUniquePrioContainerMax; use std::hash::Hash; pub struct OutputBuilder<'a, I, OA> { pub(crate) p: StableUniquePrioContainerMax>, pub(crate) filter: Box bool + 'a>, pub(crate) output_add: OA, pub(crate) rel_list: Vec, pub(crate) max: f32, } impl<'a, I: Eq + Hash + Clone, OA: OutputAddable> OutputBuilder<'a, I, OA> { #[inline] pub(crate) fn new bool + 'a>(filter: F, len: usize) -> Self { Self { p: StableUniquePrioContainerMax::new(len), filter: Box::new(filter), output_add: OA::default(), rel_list: vec![], max: 0.0, } } #[inline] pub fn len(&self) -> usize { self.p.len() } #[inline] pub fn is_empty(&self) -> bool { self.p.is_empty() } /// Pushes an element into the output and returns `true` if it was not filtered out #[inline] pub fn push(&mut self, item: RelItem) -> bool { if !(self.filter)(&item.item) { if self.max < item.relevance { self.max = item.relevance; } let rel = item.relevance; if self.p.insert(item) { self.rel_list.push(rel); } return true; } false } } impl<'a, I: Eq + Hash + Clone, OA: OutputAddable> Pushable for OutputBuilder<'a, I, OA> { type Item = RelItem; /// Pushes an element into the output and returns `true` if it was not filtered out #[inline] fn push(&mut self, i: Self::Item) -> bool { self.push(i) } } pub trait OutputAddable: Default { #[inline] fn is_empty(&self) -> bool { false } } impl OutputAddable for () {} ================================================ FILE: lib/search/src/executor/producer.rs ================================================ use super::{out_builder::OutputBuilder, searchable::Searchable}; use engine::pushable::FilteredMaxCounter; use std::any::type_name; pub trait Producer { type Target: Searchable; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ); fn should_run(&self, _already_found: usize) -> bool { true } fn estimate_to(&self, _out: &mut FilteredMaxCounter<::Item>) {} fn name(&self) -> String { format_debug_name::() } } fn format_debug_name() -> String { let mut name = type_name::().to_string(); // Strip module name let start_pos = name .char_indices() .rev() .find(|i| i.1 == ':') .map(|i| i.0 + 1) .unwrap_or(0); name.replace_range(0..start_pos, ""); name } ================================================ FILE: lib/search/src/executor/search_result.rs ================================================ use std::ops::Deref; /// The final result of a search #[derive(Clone, Debug)] pub struct SearchResult { pub items: Vec, pub total: usize, pub other_data: O, } impl SearchResult { /// Creates a new SearchResult from a vec #[inline] pub fn from_vec(items: Vec) -> Self { let total = items.len(); Self { items, total, other_data: (), } } /// Creates a new search result #[inline] pub fn new(items: Vec, total: usize) -> Self { Self { items, total, other_data: (), } } } impl SearchResult { /// Creates a new search result #[inline] pub fn with_other_data(items: Vec, total: usize, other_data: O) -> Self { Self { items, total, other_data, } } #[inline] pub fn len(&self) -> usize { self.items.len() } #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } #[inline] pub fn iter(&self) -> impl Iterator { self.items.iter() } } impl SearchResult { /// Creates a new search result pub fn with_other_default(items: Vec, total: usize) -> Self { Self { items, total, other_data: O::default(), } } } impl Default for SearchResult where O: Default, { #[inline] fn default() -> Self { Self { items: vec![], total: 0, other_data: O::default(), } } } impl Deref for SearchResult { type Target = O; #[inline] fn deref(&self) -> &Self::Target { &self.other_data } } ================================================ FILE: lib/search/src/executor/searchable.rs ================================================ use super::{ out_builder::{OutputAddable, OutputBuilder}, producer::Producer, }; use crate::query::Query; use std::{fmt::Debug, hash::Hash}; pub trait Searchable { type Item: Clone + Eq + Hash + Debug; type OutItem; type ResAdd: OutputAddable; fn get_producer<'s>(&'s self) -> &Vec + 's>>; fn get_query(&self) -> &Query; /// Converts from `Self::Item` to `Self::OutputItem` fn to_output_item(&self, item: Self::Item) -> Self::OutItem; /// Allows modifying the collected producers output before converting it to a SearchResult fn mod_output(&self, _out: &mut OutputBuilder) {} /// Should return `true` if the passed item should be ignored / filtered fn filter(&self, _item: &Self::Item) -> bool { false } #[inline] fn max_top_dist(&self) -> Option { None } } ================================================ FILE: lib/search/src/kanji/mod.rs ================================================ mod order; pub mod result; mod tag_only; use self::result::KanjiResult; use super::query::Query; use crate::{engine::words::native::Engine, query::QueryLang, word::order::native::NativeOrder}; use engine::task::SearchTask; use error::Error; use jp_utils::JapaneseExt; use result::Item; use types::jotoba::{ kanji::Kanji, search::guess::{Guess, GuessType}, }; /// The entry of a kanji search pub fn search(query: &Query) -> Result { if query.form.is_tag_only() { return tag_only::search(query); } let query_str = format_query(&query.query_str); let res = match query.q_lang { QueryLang::Japanese => by_japanese_query(&query.query_str), QueryLang::Korean => by_korean_reading(&query.query_str), QueryLang::Foreign | QueryLang::Undetected => by_meaning(&query.query_str), }; // TODO: don't use this items in v2 since compound words don't need to be loaded // here anymore let mut items = to_item(res, &query); if !query_str.is_japanese() { items.sort_by(order::default); } let total_len = items.len(); let page_size = query.settings.page_size as usize; let items = items .into_iter() .skip(query.page_offset(page_size)) .take(page_size) .collect::>(); Ok(KanjiResult { items, total_len }) } /// Find a kanji by its literal fn by_japanese_query(query: &str) -> Vec { // Use kanji from query let kanji = kanji_from_str(query); if !kanji.is_empty() || query.is_kanji() { return kanji; } // Do word searc with kana instead kana_search(query) } /// Search for kanji using kana query fn kana_search(query: &str) -> Vec { let q = query.to_string(); let mut search_task = SearchTask::::new(query) .with_limit(10) .with_threshold(0.8) .with_result_filter(move |i| i.has_reading(&q)) .with_custom_order(NativeOrder::new(query.to_string())); search_task .find() .into_iter() .map(|i| kanji_from_str(&i.get_reading().reading)) .flatten() .take(100) .collect() } fn by_korean_reading(query: &str) -> Vec { resources::get() .kanji() .iter() .filter(|k| k.korean_h.iter().any(|kw| kw == query)) .cloned() .collect() } #[inline] fn from_char(c: char) -> Option { resources::get().kanji().by_literal(c).cloned() } fn kanji_from_str(text: &str) -> Vec { text.chars() .into_iter() .filter_map(|i| i.is_kanji().then(|| from_char(i)).flatten()) .take(100) .collect() } /// Guesses the amount of results a search would return with given `query` pub fn guess_result(query: &Query) -> Option { let query_str = &query.query_str; let kanji_storage = resources::get().kanji(); let guess = query_str .chars() .into_iter() .filter(|i| i.is_kanji()) .filter_map(|literal| kanji_storage.by_literal(literal)) .take(15) .count(); Some(Guess::new(guess as u32, GuessType::Accurate)) } /// Find kanji by mits meaning fn by_meaning(meaning: &str) -> Vec { // TODO: implement proper algo kek let meaning = meaning.to_lowercase(); resources::get() .kanji() .iter() .filter(|i| i.meanings.contains(&meaning)) .cloned() .collect::>() } #[inline] fn to_item(items: Vec, query: &Query) -> Vec { items .into_iter() .map(|i| Item::load_words(i, query.lang_param())) .collect() } #[inline] fn format_query(query: &str) -> String { query.replace(" ", "").replace(".", "").trim().to_string() } ================================================ FILE: lib/search/src/kanji/order.rs ================================================ use super::result::Item; use std::cmp::Ordering; use utils::option_order; /// Order kanji results which were found by the kanjis meaning appropriately #[inline] pub(crate) fn default(a: &Item, b: &Item) -> Ordering { let a = &a.kanji; let b = &b.kanji; if let Some(o) = option_order(&a.grade, &b.grade) { return o; } if let Some(o) = option_order(&a.frequency, &b.frequency) { return o; } if let Some(o) = option_order(&a.jlpt, &b.jlpt) { return o; } Ordering::Equal } ================================================ FILE: lib/search/src/kanji/result.rs ================================================ use std::{fs::read_to_string, path::Path}; use types::jotoba::{ kanji::Kanji, language::param::AsLangParam, words::{filter_languages, Word}, }; // The final result of a Kanji search #[derive(Default)] pub struct KanjiResult { pub items: Vec, pub total_len: usize, } #[derive(Debug, PartialEq, Clone)] pub struct Item { pub kanji: Kanji, pub kun_dicts: Option>, pub on_dicts: Option>, pub has_compositions: bool, } impl Item { pub fn load_words(k: Kanji, lang: impl AsLangParam) -> Self { let kun_dicts = load_dicts(&k.kun_dicts, lang); let on_dicts = load_dicts(&k.on_dicts, lang); let has_compositions = resources::get().kanji().ids(k.literal).is_some(); Self { kun_dicts, on_dicts, kanji: k, has_compositions, } } } fn load_dicts(dicts: &Vec, lang: impl AsLangParam) -> Option> { let word_storage = resources::get().words(); let mut words: Vec<_> = dicts .iter() .filter_map(|j| word_storage.by_sequence(*j)) .cloned() .collect(); filter_languages(words.iter_mut(), lang); if words.is_empty() { return None; } Some(words) } impl Item { /// Returns the entries' frames (svg) pub fn get_frames>(&self, assets_path: P) -> Option { read_to_string(self.kanji.get_stroke_frames_path(assets_path)).ok() } /// Return the animation entries for the template pub fn get_animation>(&self, assets_path: P) -> Option { read_to_string(self.kanji.get_animation_path(assets_path)).ok() } /// Get a list of korean readings, formatted as: " ()" pub fn get_korean(&self) -> Option> { if !self.kanji.korean_r.is_empty() && !self.kanji.korean_h.is_empty() { let korean_h = &self.kanji.korean_h; let korean_r = &self.kanji.korean_r; Some( korean_h .iter() .zip(korean_r.iter()) .map(|(h, k)| format!("{} ({})", h, k)) .collect(), ) } else { None } } /// Returns the amount of parts a kanji is bulit with #[inline] pub fn get_parts_count(&self) -> usize { self.kanji.parts.len() } #[inline] pub fn get_radical(&self) -> String { if let Some(ref alternative) = self.kanji.radical.alternative { format!("{} ({})", self.kanji.radical.literal, alternative) } else { self.kanji.radical.literal.clone().to_string() } } #[inline] pub fn get_rad_len(&self) -> usize { self.kanji .radical .alternative .as_ref() .map(|_| 1) .unwrap_or_default() + self .kanji .radical .translations .as_ref() .map(|i| i.join(", ").len()) .unwrap_or_default() } } ================================================ FILE: lib/search/src/kanji/tag_only.rs ================================================ use super::KanjiResult; use crate::query::{tags::Tag, Query}; use error::Error; pub fn search(query: &Query) -> Result { let single_tag = query.tags.iter().find(|i| i.is_producer()); if single_tag.is_none() { return Ok(KanjiResult::default()); } match single_tag.unwrap() { Tag::Jlpt(jlpt) => jlpt_search(query, *jlpt), Tag::GenkiLesson(genki_lesson) => genki_search(query, *genki_lesson), _ => return Ok(KanjiResult::default()), } } fn genki_search(query: &Query, genki_lesson: u8) -> Result { let kanji_retrieve = resources::get().kanji(); let genki_lesson = match kanji_retrieve.by_genki_lesson(genki_lesson) { Some(gl) => gl, None => return Ok(KanjiResult::default()), }; let kanji = genki_lesson .iter() .filter_map(|literal| kanji_retrieve.by_literal(*literal)) .cloned() .collect::>(); let total_len = kanji.len(); let page_size = query.settings.page_size as usize; let page_offset = query.page_offset(page_size); let kanji = kanji .into_iter() .skip(page_offset) .take(page_size) .collect::>(); let items = super::to_item(kanji, query); Ok(KanjiResult { items, total_len }) } fn jlpt_search(query: &Query, jlpt: u8) -> Result { let kanji_retrieve = resources::get().kanji(); let jlpt_kanji = match kanji_retrieve.by_jlpt(jlpt) { Some(jlpt) => jlpt, None => return Ok(KanjiResult::default()), }; let total_len = jlpt_kanji.len(); let page_size = query.settings.page_size as usize; let page_offset = query.page_offset(page_size); let jlpt_kanji = jlpt_kanji .into_iter() .skip(page_offset) .take(page_size) .filter_map(|literal| kanji_retrieve.by_literal(*literal)) .cloned() .collect::>(); Ok(KanjiResult { items: super::to_item(jlpt_kanji, query), total_len, }) } ================================================ FILE: lib/search/src/lib.rs ================================================ pub mod engine; pub mod executor; pub mod kanji; pub mod name; pub mod query; pub mod radical; pub mod sentence; pub mod word; pub use executor::SearchExecutor; use query::Query; use types::jotoba::search::{help::SearchHelp, SearchTarget}; /// Build a [`SearchHelp`] in for cases without any search results pub fn build_help(querytype: SearchTarget, query: &Query) -> Option { let mut help = SearchHelp::default(); for qt in SearchTarget::iterate().filter(|i| *i != querytype) { match qt { SearchTarget::Kanji => help.kanji = kanji::guess_result(query), SearchTarget::Sentences => { help.sentences = SearchExecutor::new(sentence::Search::new(query)).guess() } SearchTarget::Names => { help.names = SearchExecutor::new(name::Search::new(query)).guess() } SearchTarget::Words => { help.words = SearchExecutor::new(word::Search::new(query)).guess() } } } if querytype == SearchTarget::Words { //help.other_langs = word::guess_inp_language(query); } (!help.is_empty()).then(|| help) } ================================================ FILE: lib/search/src/name/mod.rs ================================================ mod order; mod producer; use crate::{ executor::{producer::Producer, searchable::Searchable}, query::Query, }; use producer::{ foreign::ForeignProducer, kanji_reading::KreadingProducer, native::NativeProducer, sequence::SeqProducer, }; use types::jotoba::names::Name; use self::producer::native::split::SplitProducer; pub struct Search<'a> { query: &'a Query, producer: Vec + 'a>>, } impl<'a> Search<'a> { pub fn new(query: &'a Query) -> Self { let mut producer: Vec>> = vec![]; producer.push(Box::new(SeqProducer::new(query))); producer.push(Box::new(KreadingProducer::new(query))); producer.push(Box::new(ForeignProducer::new(query))); producer.push(Box::new(NativeProducer::new(query))); producer.push(Box::new(SplitProducer::new(query))); Self { query, producer } } } impl<'a> Searchable for Search<'a> { type Item = &'static Name; type OutItem = &'static Name; type ResAdd = (); #[inline] fn to_output_item(&self, item: Self::Item) -> Self::OutItem { item } #[inline] fn get_producer<'s>(&'s self) -> &Vec + 's>> { &self.producer } #[inline] fn get_query(&self) -> &Query { self.query } } ================================================ FILE: lib/search/src/name/order/foreign.rs ================================================ use engine::relevance::{data::SortData, RelevanceEngine}; use ngindex::{item::IndexItem, termset::TermSet}; use types::jotoba::names::Name; pub struct ForeignOrder; impl RelevanceEngine for ForeignOrder { type OutItem = &'static Name; type IndexItem = IndexItem; type Query = TermSet; #[inline] fn score<'item, 'query>( &self, item: &SortData<'item, 'query, Self::OutItem, Self::IndexItem, Self::Query>, ) -> f32 { item.index_item().dice_weighted(item.query(), 0.1) } } ================================================ FILE: lib/search/src/name/order/japanese.rs ================================================ use engine::relevance::{data::SortData, RelevanceEngine}; use ngindex::{item::IndexItem, termset::TermSet}; use types::jotoba::names::Name; pub struct NativeOrder; impl RelevanceEngine for NativeOrder { type OutItem = &'static Name; type IndexItem = IndexItem; type Query = TermSet; #[inline] fn score<'item, 'query>( &self, item: &SortData<'item, 'query, Self::OutItem, Self::IndexItem, Self::Query>, ) -> f32 { item.index_item().dice_weighted(item.query(), 0.1) } } ================================================ FILE: lib/search/src/name/order/mod.rs ================================================ pub mod foreign; pub mod japanese; ================================================ FILE: lib/search/src/name/producer/foreign.rs ================================================ use engine::{pushable::FilteredMaxCounter, task::SearchTask}; use crate::{ engine::names::foreign::Engine, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, name::{order::foreign::ForeignOrder, Search}, query::{Query, QueryLang}, }; pub struct ForeignProducer<'a> { query: &'a Query, } impl<'a> ForeignProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn foreign_task(&self) -> SearchTask<'static, Engine> { let query = format_word(&self.query.query_str); SearchTask::::new(&query) .with_custom_order(ForeignOrder) .with_threshold(0.5) } } impl<'a> Producer for ForeignProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.foreign_task().find_to(out); } fn should_run(&self, _already_found: usize) -> bool { self.query.q_lang != QueryLang::Japanese } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { self.foreign_task().estimate_to(out); } } #[inline] fn format_word(inp: &str) -> String { let mut out = String::from(inp.to_lowercase()); for i in ".,[]() \t\"'\\/-;:".chars() { out = out.replace(i, " "); } out.to_lowercase() } ================================================ FILE: lib/search/src/name/producer/kanji_reading.rs ================================================ use crate::{ engine::names::native::Engine, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, name::Search, query::Query, }; use engine::{pushable::FilteredMaxCounter, task::SearchTask}; use japanese::furigana::generate::{assign_readings, ReadingRetrieve}; use resources::retrieve::kanji::KanjiRetrieve; use types::jotoba::names::Name; pub struct KreadingProducer<'a> { query: &'a Query, } impl<'a> KreadingProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn search_task(&self) -> Option> { let k_reading = self.query.form.as_kanji_reading()?; let literal = k_reading.literal; let reading = k_reading.reading.clone(); let task = SearchTask::::new(k_reading.literal.to_string()) .with_result_filter(move |name| filter(name, &reading, literal).unwrap_or(false)); Some(task) } } impl<'a> Producer for KreadingProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { if let Some(mut task) = self.search_task() { task.find_to(out); } } fn should_run(&self, _already_found: usize) -> bool { self.query.form.is_kanji_reading() } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { if let Some(mut task) = self.search_task() { task.estimate_to(out); } } } /// Search result filter function fn filter(name: &Name, reading: &str, literal: char) -> Option { let kanji = name.kanji.as_ref()?; let retrieve = NanoriRetrieve::new(resources::get().kanji()); let readings = assign_readings(retrieve, kanji, &name.kana)?; Some( readings .iter() .any(|i| i.0.contains(&literal.to_string()) && i.1.contains(&reading)), ) } /// Custom `ReadingRetrieve` implementing struct to include // nanori readings in reading retrieve function struct NanoriRetrieve<'a> { kanji_retrieve: KanjiRetrieve<'a>, } impl<'a> NanoriRetrieve<'a> { fn new(kanji_retrieve: KanjiRetrieve<'a>) -> Self { Self { kanji_retrieve } } } impl<'a> ReadingRetrieve for NanoriRetrieve<'a> { #[inline] fn onyomi(&self, lit: char) -> Vec { self.kanji_retrieve.onyomi(lit) } #[inline] fn kunyomi(&self, lit: char) -> Vec { self.kanji_retrieve.kunyomi(lit) } fn all(&self, lit: char) -> Vec { let res = resources::get().kanji(); let k = match res.by_literal(lit) { Some(k) => k, None => return vec![], }; k.kunyomi .clone() .into_iter() .chain(k.onyomi.clone()) .chain(k.nanori.clone()) .collect() } } ================================================ FILE: lib/search/src/name/producer/mod.rs ================================================ pub mod foreign; pub mod kanji_reading; pub mod native; pub mod sequence; ================================================ FILE: lib/search/src/name/producer/native/mod.rs ================================================ pub mod split; use crate::{ engine::names::native::Engine, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, name::{order::japanese::NativeOrder, Search}, query::{Query, QueryLang}, }; use engine::{pushable::FilteredMaxCounter, task::SearchTask}; pub struct NativeProducer<'a> { query: &'a Query, } impl<'a> NativeProducer<'a> { #[inline] pub fn new(query: &'a Query) -> Self { Self { query } } #[inline] fn jp_task(&self) -> SearchTask<'static, Engine> { SearchTask::::new(&self.query.query_str) .with_custom_order(NativeOrder) .with_threshold(0.3) } } impl<'a> Producer for NativeProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.jp_task().find_to(out); } fn should_run(&self, _already_found: usize) -> bool { self.query.q_lang == QueryLang::Japanese } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { self.jp_task().estimate_to(out); } } ================================================ FILE: lib/search/src/name/producer/native/split.rs ================================================ use engine::{ pushable::{FilteredMaxCounter, Pushable}, relevance::{data::SortData, item::RelItem, RelevanceEngine}, task::SearchTask, }; use ngindex::{item::IndexItem, termset::TermSet}; use sentence_reader::{output::ParseResult, Parser}; use types::jotoba::names::Name; use crate::{ engine::names::native::Engine, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, name::{order::japanese::NativeOrder, Search}, query::Query, }; pub struct SplitProducer<'a> { query: &'a Query, } impl<'a> SplitProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn queries(&self) -> Vec { let splitted = Parser::new(&self.query.query_str); match splitted.parse() { ParseResult::Sentence(s) => s.iter().map(|p| p.get_normalized()).collect(), ParseResult::InflectedWord(w) => vec![w.get_normalized()], ParseResult::None => vec![], } } fn run(&self, cb: C, out: &mut P) where C: Fn(&mut SearchTask<'static, Engine>, &mut P), P: Pushable, { let queries = self.queries(); let query_count = queries.len(); for (pos, query) in queries.into_iter().enumerate() { let mut task = SearchTask::::new(&query) .with_limit(1) .with_custom_order(SplitOrder::new(query_count, pos)); (cb)(&mut task, out); } } fn find_to

    (&self, out: &mut P) where P: Pushable>, { self.run( |engine, out| { engine.find_to(out); }, out, ); } } impl<'a> Producer for SplitProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.find_to(out) } fn should_run(&self, already_found: usize) -> bool { already_found < 10 //already_found == 0 } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { self.run(|engine, out| engine.estimate_to(out), out); } } struct SplitOrder { q_count: usize, pos: usize, } impl SplitOrder { #[inline] fn new(q_count: usize, pos: usize) -> Self { Self { q_count, pos } } } impl RelevanceEngine for SplitOrder { type OutItem = &'static Name; type IndexItem = IndexItem; type Query = TermSet; #[inline] fn score<'item, 'query>( &self, item: &SortData<'item, 'query, Self::OutItem, Self::IndexItem, Self::Query>, ) -> f32 { let sim = NativeOrder.score(item); let rel = (self.q_count - self.pos) as f32; sim * rel * 0.001 } } ================================================ FILE: lib/search/src/name/producer/sequence.rs ================================================ use crate::{ executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, name::Search, query::Query, }; use engine::{ pushable::{FilteredMaxCounter, Pushable}, relevance::item::RelItem, }; use types::jotoba::names::Name; pub struct SeqProducer<'a> { query: &'a Query, } impl<'a> SeqProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn name(&self) -> Option<&'static Name> { let seq = *self.query.form.as_sequence()?; resources::get().names().by_sequence(seq) } } impl<'a> Producer for SeqProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { if let Some(name) = self.name() { out.push(RelItem::new(name, 0.0)); } } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { if let Some(name) = self.name() { out.push(name); } } fn should_run(&self, _already_found: usize) -> bool { self.query.form.is_sequence() } } ================================================ FILE: lib/search/src/query/form.rs ================================================ use types::jotoba::kanji; /// The form the query was provided in #[derive(Debug, Clone, PartialEq, Hash, Default)] pub enum Form { /// A single word was provided SingleWord, /// Multiple words were provided MultiWords, /// Kanji reading based search eg. '気 ケ' KanjiReading(kanji::reading::ReadingSearch), /// Tag only. Implies query string to be empty TagOnly, /// Sequence Search Sequence(u32), /// Form was not recognized #[default] Undetected, } impl Form { #[inline] pub fn as_kanji_reading(&self) -> Option<&kanji::reading::ReadingSearch> { if let Self::KanjiReading(v) = self { Some(v) } else { None } } /// Returns `true` if the form is [`KanjiReading`]. #[inline] pub fn is_kanji_reading(&self) -> bool { matches!(self, Self::KanjiReading(..)) } /// Returns `true` if the form is [`TagOnly`]. /// /// [`TagOnly`]: Form::TagOnly #[inline] pub fn is_tag_only(&self) -> bool { matches!(self, Self::TagOnly) } /// Returns `true` if the form is no special type of search #[inline] pub fn is_normal(&self) -> bool { match self { Form::SingleWord | Form::MultiWords | Form::Undetected => true, _ => false, } } /// Returns `true` if the form is [`Sequence`]. /// /// [`Sequence`]: Form::Sequence #[inline] pub fn is_sequence(&self) -> bool { matches!(self, Self::Sequence(..)) } pub fn as_sequence(&self) -> Option<&u32> { if let Self::Sequence(v) = self { Some(v) } else { None } } } ================================================ FILE: lib/search/src/query/mod.rs ================================================ pub mod form; pub mod parser; pub mod prefix; pub mod regex; pub mod tags; pub mod user_settings; pub use form::Form; pub use tags::Tag; pub use user_settings::UserSettings; use self::regex::RegexSQuery; use percent_encoding::{utf8_percent_encode, AsciiSet, NON_ALPHANUMERIC}; use std::hash::Hash; use types::jotoba::{ language::{LangParam, Language}, search::SearchTarget, words::{misc::Misc, part_of_speech::PosSimple}, }; const QUERY_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC.add(b'/'); /// A parsed query for a search request #[derive(Debug, Clone, PartialEq, Default, Hash)] pub struct Query { /// The raw query string from the user without any modifications pub raw_query: String, /// Parsed query string which will be used to find results pub query_str: String, /// Where to search {Words,Names,Kanji,Sentences} pub target: SearchTarget, /// Additional tags eg. #kanji or #jlpt4 pub tags: Vec, /// The form of the Query. Eg. KanjiReadingSearch or TagOnly pub form: Form, /// The language of the passed query string pub q_lang: QueryLang, /// User settings pub settings: UserSettings, /// Item offset based on the (current) page pub page_offset: usize, /// Current page pub page: usize, /// Word index within a sentence reader search pub word_index: usize, /// All terms the result has to contain to be shown pub must_contain: Vec, /// Overwrite the users settings language temporarily pub cust_lang: Option, /// Regex query (for jp) pub regex: Option, } /// The language of the query content itself #[derive(Debug, Default, Clone, Copy, PartialEq, Hash)] pub enum QueryLang { Japanese, Foreign, Korean, #[default] Undetected, } impl Query { /// Returns true if the query has at least one pos tag #[inline] pub fn has_part_of_speech_tags(&self) -> bool { self.get_part_of_speech_tags().next().is_some() } /// Returns an iterator over all search type tags #[inline] pub fn get_search_type_tags(&self) -> impl Iterator + '_ { self.tags.iter().filter_map(|i| i.as_search_type()) } /// Returns an iterator over all PosSimple tags #[inline] pub fn get_part_of_speech_tags(&self) -> impl Iterator + '_ { self.tags.iter().filter_map(|i| i.as_part_of_speech()) } /// Returns an iterator over all Misc tags #[inline] pub fn get_misc_tags(&self) -> impl Iterator + '_ { self.tags.iter().filter_map(|i| i.as_misc()) } /// Returns the result offset by a given page #[inline] pub fn page_offset(&self, page_size: usize) -> usize { parser::calc_page_offset(self.page, page_size) } /// Returns `true` if query has `tag` #[inline] pub fn has_tag(&self, tag: Tag) -> bool { self.tags.iter().any(|i| *i == tag) } /// Adds `n` pages to the query pub fn add_page(&mut self, n: usize) { self.page = (self.page + n).min(100); self.page_offset += (self.settings.page_size as usize) * n; } /// Returns the original_query with search type tags omitted #[inline] pub fn without_search_type_tags(&self) -> String { let (new_query, _) = parser::tags::extract_parse(&self.raw_query, |s| { let p = parser::tags::parse(&s); if p.is_empty() { return (vec![], false); } let retain = p.iter().any(|i| i.is_search_type()); (p, retain) }); new_query } /// Encodes the parsed query string using percent encoding pub fn get_query_encoded(&self) -> String { utf8_percent_encode(&self.query_str, QUERY_ENCODE_SET).to_string() } /// Returns the language with lang override applied pub fn get_search_lang(&self) -> Language { self.cust_lang.unwrap_or(self.settings.user_lang) } /// Shortcut for query.settings.user_lang. This does not apply overwritten language. For that use `get_search_lang` #[inline] pub fn lang(&self) -> Language { self.settings.user_lang } /// Returns the queries lang params #[inline] pub fn lang_param(&self) -> LangParam { self.settings.lang_param() } /// Shortcut for query.settings.show_english #[inline] pub fn show_english(&self) -> bool { self.settings.show_english } /// Returns `true` if the query is a regex query #[inline] pub fn is_regex(&self) -> bool { self.regex.is_some() } /// Returns a `RegexSQuery` if the query contains a valid regex pub fn as_regex_query(&self) -> Option<&RegexSQuery> { self.regex.as_ref() } } ================================================ FILE: lib/search/src/query/parser/lang.rs ================================================ use crate::query::{regex::RegexSQuery, QueryLang}; use jp_utils::JapaneseExt; use std::cmp::Ordering; use super::JAPANESE_THRESHOLD; /// Tries to determine between Japanese/Non japnaese pub fn parse(query: &str) -> QueryLang { let query = strip_regex(query).unwrap_or_else(|| query.to_string()); if utils::korean::is_hangul_str(&query) { return QueryLang::Korean; } match get_jp_part(&query).cmp(&JAPANESE_THRESHOLD) { Ordering::Equal => QueryLang::Undetected, Ordering::Less => QueryLang::Foreign, Ordering::Greater => QueryLang::Japanese, } } /// Returns a number 0-100 of japanese character ratio fn get_jp_part(inp: &str) -> usize { let mut total = 0; let mut japanese = 0; for c in inp.chars() { total += 1; if c.is_japanese() { japanese += 1; } } ((japanese as f32 / total as f32) * 100f32) as usize } /// Removes regex parts from a query. Returns `None` if `query` does not contain regex symbols fn strip_regex(query: &str) -> Option { Some(RegexSQuery::new(query)?.get_chars().into_iter().collect()) } ================================================ FILE: lib/search/src/query/parser/mod.rs ================================================ pub mod lang; pub(crate) mod prefix; pub mod req_terms; pub(crate) mod tags; use super::{prefix::SearchPrefix, regex::RegexSQuery, Form, Query, Tag, UserSettings}; use jp_utils::JapaneseExt; use types::jotoba::{kanji, language::Language as ContentLanguage, search::SearchTarget}; /// Max amount of characters a query is allowed to have pub const MAX_QUERY_LEN: usize = 400; /// Amount of characters (in percent) that have to be Japanese characters /// in order to handle the input as Japanese text pub const JAPANESE_THRESHOLD: usize = 40; /// Represents a query pub struct QueryParser { /// Where to search {Words,Names,Kanji,Sentences} q_type: SearchTarget, /// The unmodified query from the search-input raw_query: String, /// Users settings user_settings: UserSettings, /// Item offset based on the picked page page_offset: usize, /// Current page page: usize, /// Word index for the sentence reader word_index: usize, /// Overwrite the users settings language language_override: Option, } impl QueryParser { /// Create a new QueryParser pub fn new( raw_query: String, q_type: SearchTarget, user_settings: UserSettings, ) -> QueryParser { QueryParser { raw_query, q_type, user_settings, page_offset: 0, page: 0, word_index: 0, language_override: None, } } #[inline] pub fn with_lang_overwrite(mut self, lang: ContentLanguage) -> Self { self.language_override = Some(lang); self } #[inline] pub fn with_word_index(mut self, word_index: usize) -> Self { self.word_index = word_index; self } #[inline] pub fn with_page(mut self, page: usize) -> Self { self.page = page; self.page_offset = calc_page_offset(page, self.user_settings.page_size as usize); self } /// Parses a user query into Query pub fn parse(mut self) -> Option { let (stripped, s_prefix) = prefix::parse_prefix(&self.raw_query); if let Some(SearchPrefix::LangOverwrite(r#lang_overwrite)) = s_prefix { self.language_override = Some(lang_overwrite); } let (new_query, tags) = Self::extract_tags(&stripped); let (new_query, must_contain) = req_terms::parse(&new_query); let query_str: String = new_query .trim() .chars() .into_iter() .take(MAX_QUERY_LEN) .collect(); // Don't allow empty queries if query_str.is_empty() && !tags.iter().any(|i| i.is_producer()) { return None; } let q_lang = lang::parse(&query_str); let target = self.get_search_target(&tags); let form = self.parse_form(&query_str, &tags, s_prefix); let regex = RegexSQuery::new(&query_str); Some(Query { q_lang, target, form, tags, query_str, raw_query: self.raw_query, settings: self.user_settings, page_offset: self.page_offset, page: self.page, word_index: self.word_index, cust_lang: self.language_override, must_contain, regex, }) } // Extracts all tags from `query_str` and returns a new String along with the extracted tags #[inline] fn extract_tags(query_str: &str) -> (String, Vec) { tags::extract_parse(query_str, |t_s| { let s = t_s.to_lowercase(); (tags::parse(&s), true) }) } /// Parses the QueryType based on the user selection and tags #[inline] fn get_search_target(&self, tags: &[Tag]) -> SearchTarget { tags.iter() .filter_map(|i| i.as_search_type()) .copied() .next() .unwrap_or(self.q_type) } fn parse_form(&self, query: &str, tags: &[Tag], s_prefix: Option) -> Form { // Sequence search if let Some(SearchPrefix::BySequence(r#seq)) = s_prefix { return Form::Sequence(seq); } // Tag only search if query.is_empty() && tags.iter().any(|i| i.is_producer()) { return Form::TagOnly; } // Detect a kanji reading query if let Some(kr) = self.parse_kanji_reading(query) { return Form::KanjiReading(kr); } // Japanese only input if query.is_japanese() { return Form::SingleWord; } // Non Japanese input if !query.has_japanese() { // Assuming every other supported language is // not as retarded as Japanese and actually uses spaces INSTEAD OF FUCKING 2000 CHARACTERS FFS return if query.contains(' ') { Form::MultiWords } else { Form::SingleWord }; } Form::Undetected } /// Returns Some(KanjiReading) if the query is a kanji reading query fn parse_kanji_reading(&self, query: &str) -> Option { // Format of kanji query: ' ' if utils::real_string_len(query) < 3 || !query.contains(' ') { return None; } let split: Vec<_> = query.split(' ').collect(); let kanji_lit = split[0].trim(); if kanji_lit.is_kanji() && format_kanji_reading(split[1]).is_japanese() // don't allow queries like '音楽 おと' && utils::real_string_len(kanji_lit) == 1 { // Kanji detected return Some(kanji::reading::ReadingSearch { literal: split[0].chars().next().unwrap(), reading: split[1].to_string(), }); } None } } #[inline] pub fn format_kanji_reading(s: &str) -> String { s.replace('.', "").replace('-', "").replace(' ', "") } pub fn calc_page_offset(page: usize, page_size: usize) -> usize { page.saturating_sub(1) * page_size } ================================================ FILE: lib/search/src/query/parser/prefix.rs ================================================ use crate::query::prefix::SearchPrefix; use std::str::FromStr; use types::jotoba::language::Language; /// Strinps and parses a `SearchPrefix` from a `query` pub fn parse_prefix(query: &str) -> (&str, Option) { if let (new_query, Some(lang)) = try_lang_prefix(query) { return (new_query, Some(SearchPrefix::LangOverwrite(lang))); } if let Some(seq_id) = try_sequence(query) { return (query, Some(SearchPrefix::BySequence(seq_id))); } (query, None) } fn try_lang_prefix(query: &str) -> (&str, Option) { let split_pos = query.find(':'); if split_pos.is_none() || *split_pos.as_ref().unwrap() > 3 || query.len() < 5 { return (query, None); } let split_pos = split_pos.unwrap(); let lang_str = &query[..split_pos].trim(); let lang = match Language::from_str(lang_str) { Ok(lang) => lang, Err(_) => { return (query, None); } }; let new_query = query[split_pos + 1..].trim(); (new_query, Some(lang)) } #[inline] fn try_sequence(query: &str) -> Option { if let Some(seq_str) = query.strip_prefix("seq:") { let seq_str = seq_str.trim(); let parsed: u32 = seq_str.parse().ok()?; return Some(parsed); } None } #[cfg(test)] mod tests { use super::*; #[test] fn test_lang_override_split() { let query = "eng: dog"; let (new_query, language) = try_lang_prefix(query); assert_eq!(new_query, "dog"); assert_eq!(language, Some(Language::English)); } #[test] fn test_lang_override_split_invalid() { let query = "eng:"; let (new_query, language) = try_lang_prefix(query); assert_eq!(new_query, "eng:"); assert_eq!(language, None); let query = "egn:"; let (new_query, language) = try_lang_prefix(query); assert_eq!(new_query, "egn:"); assert_eq!(language, None); } } ================================================ FILE: lib/search/src/query/parser/req_terms.rs ================================================ use once_cell::sync::Lazy; use regex::Regex; pub const QUOTS_CONTENT: Lazy = Lazy::new(|| Regex::new(r#""[^"]+""#).unwrap()); pub fn parse(inp: &str) -> (String, Vec) { if !inp.contains('\"') { return (inp.to_string(), vec![]); } let mut terms = vec![]; let mut new_query = inp.to_string(); let mut delta = 0; for quots in QUOTS_CONTENT.find_iter(inp) { let r = quots.range(); // strip quotes, we want the content let s = r.start - delta; let e = r.end - delta - 1; new_query.replace_range(s..s + 1, ""); new_query.replace_range(e - 1..e, ""); let s = r.start + 1; let e = r.end - 1; let term = &inp[s..e].trim(); if !term.is_empty() { terms.push(term.to_string().to_lowercase()); } delta += 2; } (new_query, terms) } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_quote_cnt() { let inp = r#"this is "some" text that "contains some" quotes" lol"#; let res = vec!["some", "contains some"]; assert_eq!(parse(inp).1, res); } } ================================================ FILE: lib/search/src/query/parser/tags.rs ================================================ use crate::query::Tag; use once_cell::sync::Lazy; use regex::Regex; use std::str::FromStr; use types::jotoba::{ search::SearchTarget, sentences, words::{misc::Misc, part_of_speech::PosSimple}, }; use utils::trim_string_end; /// Regex for finding tags within a query. static TAG_REGEX: Lazy = Lazy::new(|| regex::Regex::new("#[a-zA-Z0-9\\-]+").unwrap()); /// Extracts all tags from the query and returns a new one without tags along with those tags which were extracted pub fn extract_parse<'a, F>(inp: &'a str, parse: F) -> (String, Vec) where F: Fn(&str) -> (Vec, bool), { let mut new_out = inp.to_string(); let mut tags = vec![]; // We edit the string so we have to keep track of how many bytes // we already removed in order to remove the correct range from the string let mut delta = 0; for m in TAG_REGEX.find_iter(inp) { let tag_str = m.as_str(); let (parsed_tags, remove) = parse(tag_str); if !parsed_tags.is_empty() { tags.extend(parsed_tags); } if !remove { continue; } // Remove tag-str from query let r = m.range(); let s = r.start - delta; let mut e = r.end - delta; // Strip space from tag too if new_out.len() > e + 1 && inp.is_char_boundary(e + 1) && &inp[e..e + 1] == " " { e += 1; delta += 1; } new_out.replace_range(s..e, ""); delta += r.len(); } (trim_string_end(new_out), tags) } /// Parse a tag from a string pub fn parse(s: &str) -> Vec { let mut tags: Vec = vec![]; if let Some(tag) = s.to_lowercase().strip_prefix("#") { match tag { "hidden" | "hide" => tags.push(Tag::Hidden), "irrichidan" | "irregularichidan" | "irregular-ichidan" => { tags.push(Tag::IrregularIruEru); } "katakana" => tags.push(Tag::Katakana), _ => (), } } if let Some(tag) = parse_genki_tag(s) { tags.push(tag); } if let Some(tag) = parse_jlpt_tag(s) { tags.push(tag); } if let Some(tag) = parse_search_type(s) { tags.push(tag); } if let Some(pos) = PosSimple::from_str(&s[1..]).ok() { tags.push(Tag::PartOfSpeech(pos)); } if let Some(misc) = Misc::from_str(&s[1..]).ok() { tags.push(Tag::Misc(misc)); } if let Some(sentence_tag) = sentences::Tag::from_str(&s[1..]).ok() { tags.push(Tag::SentenceTag(sentence_tag)); } tags } /// Returns `Some(u8)` if `s` is a valid N/jlpt-tag fn parse_jlpt_tag(s: &str) -> Option { let jlpt = s .strip_prefix("#n") .or_else(|| s.strip_prefix("#jlpt"))? .parse::() .ok()? .min(5) .max(1); Some(Tag::Jlpt(jlpt)) } /// Returns `Some(u8)` if `s` is a valid genki-tag fn parse_genki_tag(s: &str) -> Option { let genki = s.strip_prefix("#genki")?.parse::().ok()?.max(3).min(23); Some(Tag::GenkiLesson(genki)) } /// Parse only search type fn parse_search_type(s: &str) -> Option { Some(match s[1..].to_lowercase().as_str() { "kanji" => Tag::SearchType(SearchTarget::Kanji), "sentence" | "sentences" => Tag::SearchType(SearchTarget::Sentences), "name" | "names" => Tag::SearchType(SearchTarget::Names), "word" | "words" => Tag::SearchType(SearchTarget::Words), "abbreviation" | "abbrev" => Tag::Misc(Misc::Abbreviation), "uwk" => Tag::Misc(Misc::UsuallyWrittenInKana), _ => return None, }) } #[cfg(test)] mod test { use super::*; #[test] fn test_parse_jlpt_tag_parsing() { assert_eq!(parse_jlpt_tag("#n4"), Some(Tag::Jlpt(4))); } #[test] fn test_parse_genki_tag_parsing() { assert_eq!(parse_genki_tag("#genki3"), Some(Tag::GenkiLesson(3))); assert_eq!(parse_genki_tag("#genki23"), Some(Tag::GenkiLesson(23))); } } ================================================ FILE: lib/search/src/query/prefix.rs ================================================ use types::jotoba::language::Language; /// Prefix of a search query. eg 'seq: 1234' #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum SearchPrefix { /// A custom language prefix. Eg: 'rus: Россия' LangOverwrite(Language), /// Search by sequence-id within jmdict BySequence(u32), } ================================================ FILE: lib/search/src/query/regex.rs ================================================ //! A regex like search query to search in words with wildcards //! //! # Example //! //! "宇宙*行士" => "宇宙飛行士" //! //! # Supported syntax //! `*` - Allows 0-n other characters //! `?` - Allows 1 other characters //! //! # Note //! All queries containing (custom)regex syntax will be handled as full-word matches. In other words if //! a query contains regex syntax, only full words will be matched. If words should also be open to //! an end (eg. right variable) then a regex charecter has to be placed at the end as well use std::hash::Hash; use regex::Regex; /// All characters treated as regex characters pub const REGEX_CHARS: &[char] = &['*', '?', '?']; /// Regex Search query. Can be used to match words #[derive(Clone, Debug)] pub struct RegexSQuery { query: String, regex: Regex, } impl RegexSQuery { /// Create a new regex query. Returns `None` if invalid or no regex given pub fn new(query: &str) -> Option { let query = adjust_regex(query); if !Self::is_regex(&query) { return None; } let regex = Regex::new(&Self::convert_regex(&query)).ok()?; Some(RegexSQuery { query, regex }) } /// Returns `true` if a word matches the regex query #[inline] pub fn matches(&self, word: &str) -> bool { self.regex.is_match(word) } /// Returns all characters that don't represent regex syntax pub fn get_chars(&self) -> Vec { let mut out = Vec::with_capacity(self.query.len()); for c in self.query.chars() { if !REGEX_CHARS.contains(&c) { out.push(c); } } out } /// Returns a real regex expression which will be used to match words fn convert_regex(query: &str) -> String { let mut out = String::with_capacity(query.len() + 2); out.push('^'); out.push_str( &query .replace('*', ".*") .replace('?', ".{1}") .replace('+', ".{1}"), ); if !out.ends_with('$') { out.push('$'); } out } /// Returns `true` if query can be interpreted as regex query #[inline] fn is_regex(query: &str) -> bool { let query = adjust_regex(query); query.contains('*') || query.contains('+') || query.contains('?') } /// Get a reference to the regex squery's query. pub fn query(&self) -> &str { self.query.as_ref() } } /// Adjusts the query to a consistent format #[inline] fn adjust_regex(query: &str) -> String { query .replace('*', "*") .replace('+', "+") .replace('?', "?") } impl Hash for RegexSQuery { #[inline] fn hash(&self, state: &mut H) { self.query.hash(state); } } impl PartialEq for RegexSQuery { #[inline] fn eq(&self, other: &Self) -> bool { self.query == other.query } } impl Eq for RegexSQuery {} ================================================ FILE: lib/search/src/query/tags.rs ================================================ use types::jotoba::{ search::SearchTarget, sentences, words::{misc::Misc, part_of_speech::PosSimple}, }; /// Hashtag based search tags #[derive(Debug, Clone, Copy, PartialEq, Hash)] pub enum Tag { // Producer tags PartOfSpeech(PosSimple), Misc(Misc), Jlpt(u8), GenkiLesson(u8), Katakana, SentenceTag(sentences::Tag), IrregularIruEru, // Non producer SearchType(SearchTarget), Hidden, } impl Tag { /// Returns true if the tag can be used without a query #[inline] pub fn is_producer(&self) -> bool { !self.is_search_type() && !self.is_hidden() } /// Returns `true` if the tag is [`SearchType`]. #[inline] pub fn is_search_type(&self) -> bool { matches!(self, Self::SearchType(..)) } /// Returns `true` if the tag is [`PartOfSpeech`]. #[inline] pub fn is_part_of_speech(&self) -> bool { matches!(self, Self::PartOfSpeech(..)) } #[inline] pub fn as_search_type(&self) -> Option<&SearchTarget> { if let Self::SearchType(v) = self { Some(v) } else { None } } #[inline] pub fn as_part_of_speech(&self) -> Option<&PosSimple> { if let Self::PartOfSpeech(v) = self { Some(v) } else { None } } /// Returns `true` if the tag is [`Misc`]. /// /// [`Misc`]: Tag::Misc #[inline] pub fn is_misc(&self) -> bool { matches!(self, Self::Misc(..)) } #[inline] pub fn as_misc(&self) -> Option<&Misc> { if let Self::Misc(v) = self { Some(v) } else { None } } /// Returns `true` if the tag is [`Jlpt`]. /// /// [`Jlpt`]: Tag::Jlpt #[inline] pub fn is_jlpt(&self) -> bool { matches!(self, Self::Jlpt(..)) } #[inline] pub fn as_jlpt(&self) -> Option { if let Self::Jlpt(v) = self { Some(*v) } else { None } } /// Returns `true` if the tag is [`GenkiLesson`]. /// /// [`GenkiLesson`]: Tag::GenkiLesson #[inline] pub fn is_genki_lesson(&self) -> bool { matches!(self, Self::GenkiLesson(..)) } #[inline] pub fn as_genki_lesson(&self) -> Option { if let Self::GenkiLesson(v) = self { Some(*v) } else { None } } /// Returns `true` if the tag is [`IrregularIruEru`]. /// /// [`IrregularIruEru`]: Tag::IrregularIruEru pub fn is_irregular_iru_eru(&self) -> bool { matches!(self, Self::IrregularIruEru) } /// Returns `true` if the tag is [`Hidden`]. /// /// [`Hidden`]: Tag::Hidden #[must_use] pub fn is_hidden(&self) -> bool { matches!(self, Self::Hidden) } /// Returns `true` if the tag is [`SentenceTag`]. /// /// [`SentenceTag`]: Tag::SentenceTag #[must_use] #[inline] pub fn is_sentence_tag(&self) -> bool { matches!(self, Self::SentenceTag(..)) } #[inline] pub fn as_sentence_tag(&self) -> Option<&sentences::Tag> { if let Self::SentenceTag(v) = self { Some(v) } else { None } } /// Returns `true` if the tag is [`Katakana`]. /// /// [`Katakana`]: Tag::Katakana #[must_use] pub fn is_katakana(&self) -> bool { matches!(self, Self::Katakana) } } ================================================ FILE: lib/search/src/query/user_settings.rs ================================================ use std::hash::{Hash, Hasher}; use types::jotoba::language::{LangParam, Language}; /// In-cookie saved personalized settings by an user #[derive(Debug, Clone, Copy)] pub struct UserSettings { pub user_lang: Language, pub page_lang: localization::language::Language, pub show_english: bool, pub english_on_top: bool, pub page_size: u32, pub show_example_sentences: bool, pub sentence_furigana: bool, } impl UserSettings { /// Returns `true` if an action has to be done for english too. This /// Is the case if the user wants to see enlgish results as well but /// didn't set english as main language #[inline] pub fn show_english(&self) -> bool { self.show_english && self.user_lang != Language::English } #[inline] pub fn language(&self) -> Language { self.user_lang } /// Returns a LangParam respecting the users settings language preferences #[inline] pub fn lang_param(&self) -> LangParam { LangParam::with_en_raw(self.user_lang, self.show_english()) } } impl Default for UserSettings { #[inline] fn default() -> Self { Self { show_english: true, user_lang: Language::default(), page_lang: localization::language::Language::default(), english_on_top: false, page_size: 10, show_example_sentences: true, sentence_furigana: true, } } } impl PartialEq for UserSettings { #[inline] fn eq(&self, other: &Self) -> bool { self.user_lang == other.user_lang && self.show_english == other.show_english } } impl Hash for UserSettings { #[inline] fn hash(&self, state: &mut H) { self.user_lang.hash(state); self.show_english.hash(state); } } ================================================ FILE: lib/search/src/radical/mod.rs ================================================ /// Radical word search pub mod word; use std::collections::HashSet; /// Finds radicals by its meanings pub fn meaning_search(query: &str) -> HashSet { crate::engine::radical::find(query) .into_iter() .map(|j| j.literal) .collect() } ================================================ FILE: lib/search/src/radical/word/foreign.rs ================================================ use std::collections::HashSet; use crate::{engine::words::foreign::Engine, word::order::foreign::ForeignOrder}; use engine::{result::SearchResult, task::SearchTask}; use jp_utils::JapaneseExt; use types::jotoba::{language::Language, words::Word}; /// Amount of words to return in a search for radicals const WORD_LIMIT: usize = 3; /// Search for radicals in words by a foreign query pub struct Search<'a> { query: &'a str, lang: Language, } impl<'a> Search<'a> { #[inline] pub fn new(query: &'a str, lang: Language) -> Self { Self { query, lang } } /// Does a kana word-search and returns some likely radicals for the given query #[inline] pub fn run(&self) -> HashSet { let mut search_task = self.search_task(); self.select_kanji(search_task.find()) } #[inline] fn search_task(&self) -> SearchTask<'static, Engine> { SearchTask::with_language(&self.query, self.lang) .with_custom_order(ForeignOrder::new()) .with_limit(WORD_LIMIT) } fn select_kanji(&self, res: SearchResult<&Word>) -> HashSet { let kanji_retr = resources::get().kanji(); res.into_iter() .filter(|word| word.get_reading().reading == self.query) .map(|word| word.get_reading().reading.chars().filter(|i| i.is_kanji())) .flatten() .filter_map(|kanji| kanji_retr.by_literal(kanji).map(|i| &i.parts)) .flatten() .copied() .take(10) .collect() } } ================================================ FILE: lib/search/src/radical/word/mod.rs ================================================ pub mod foreign; pub mod romaji; pub use foreign::Search as ForeignSearch; pub use romaji::Search as RomajiSearch; ================================================ FILE: lib/search/src/radical/word/romaji.rs ================================================ use crate::{engine::words::native::Engine, word::order::native::NativeOrder}; use engine::{result::SearchResult, task::SearchTask}; use jp_utils::JapaneseExt; use std::collections::HashSet; use types::jotoba::words::Word; /// Amount of words to return in a search for radicals const WORD_LIMIT: usize = 3; /// Search for radicals in words by a foreign query pub struct Search<'a> { query: &'a str, } impl<'a> Search<'a> { #[inline] pub fn new(query: &'a str) -> Self { Self { query } } /// Does a kana word-search and returns some likely radicals for the given query #[inline] pub fn run(&self) -> HashSet { let mut search_task = self.search_task(); self.select_kanji(search_task.find()) } #[inline] fn search_task(&self) -> SearchTask<'static, Engine> { SearchTask::new(&self.query) .with_limit(WORD_LIMIT) .with_threshold(0.8) .with_custom_order(NativeOrder::new(self.query.to_string())) } fn select_kanji(&self, res: SearchResult<&Word>) -> HashSet { let kanji_retr = resources::get().kanji(); res.into_iter() .map(|word| word.get_reading().reading.chars().filter(|i| i.is_kanji())) .flatten() .filter_map(|kanji| kanji_retr.by_literal(kanji).map(|i| &i.parts)) .flatten() .copied() .take(10) .collect() } } ================================================ FILE: lib/search/src/sentence/mod.rs ================================================ pub mod order; mod producer; pub mod result; use super::query::Query; use crate::{ executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::Tag, }; use producer::{ foreign::ForeignProducer, native::NativeProducer, sequence::SequenceProducer, tag::TagProducer, }; use result::ResData; use types::jotoba::{language::Language, sentences::Sentence}; pub struct Search<'a> { query: &'a Query, producer: Vec + 'a>>, } impl<'a> Search<'a> { pub fn new(query: &'a Query) -> Self { let mut producer: Vec>> = vec![ Box::new(SequenceProducer::new(query)), Box::new(ForeignProducer::new(query, query.lang())), Box::new(TagProducer::new(query)), Box::new(NativeProducer::new(query, query.lang())), ]; if query.lang() != Language::English && query.show_english() { producer.push(Box::new(ForeignProducer::new(query, Language::English))); producer.push(Box::new(NativeProducer::new(query, Language::English))); } Self { query, producer } } } impl<'a> Searchable for Search<'a> { type ResAdd = ResData; type OutItem = result::Sentence; type Item = &'static Sentence; fn get_producer<'s>(&'s self) -> &Vec + 's>> { &self.producer } fn mod_output(&self, out: &mut OutputBuilder) { out.output_add = ResData::new(self.query.has_tag(Tag::Hidden)); } #[inline] fn to_output_item(&self, item: Self::Item) -> Self::OutItem { result::Sentence::from_m_sentence(item, self.query.lang_param()).unwrap() } fn get_query(&self) -> &Query { self.query } #[inline] fn filter(&self, item: &Self::Item) -> bool { !producer::filter::filter_sentence(self.query, item) } #[inline] fn max_top_dist(&self) -> Option { Some(0.9) //None } } ================================================ FILE: lib/search/src/sentence/order/foreign.rs ================================================ use engine::relevance::{data::SortData, RelevanceEngine}; use sparse_vec::SpVec32; use types::jotoba::{language::Language, sentences::Sentence}; use vsm::doc_vec::DocVector; pub struct ForeignOrder { lang: Language, } impl ForeignOrder { pub fn new(lang: Language) -> Self { Self { lang } } } impl RelevanceEngine for ForeignOrder { type OutItem = &'static Sentence; type IndexItem = DocVector; type Query = SpVec32; fn score<'item, 'query>( &self, item: &SortData<'item, 'query, Self::OutItem, Self::IndexItem, Self::Query>, ) -> f32 { let mut rel = item.vec_similarity(); if !item.item().has_translation(self.lang) { rel *= 0.8; } rel } } ================================================ FILE: lib/search/src/sentence/order/mod.rs ================================================ pub mod foreign; pub mod native; ================================================ FILE: lib/search/src/sentence/order/native.rs ================================================ use engine::relevance::{data::SortData, RelevanceEngine}; use sparse_vec::{SpVec32, VecExt}; use types::jotoba::{language::Language, sentences::Sentence}; use vsm::doc_vec::DocVector; pub const QUERY_WEIGHT: f32 = 100.0; pub struct NativeOrder { lang: Language, } impl NativeOrder { pub fn new(lang: Language) -> Self { Self { lang } } } impl RelevanceEngine for NativeOrder { type OutItem = &'static Sentence; type IndexItem = DocVector; type Query = SpVec32; #[inline] fn score<'item, 'query>( &self, item: &SortData<'item, 'query, Self::OutItem, Self::IndexItem, Self::Query>, ) -> f32 { //let mut rel = term_dist(item.query(), item.index_item().vec()); let mut rel = sim(item.query(), item.index_item().vec(), QUERY_WEIGHT); if !item.item().has_translation(self.lang) { rel *= 0.99; } rel } } /// Calculates a similar value to the cosine similarity between vec_a and vec_b but /// gives the length of vec_a more weight than vec_b's length. /// This prevents longer sentences being less relevant than short sentences, even if /// the longer sentences contains all terms of the query when the short sentence does not. #[inline] fn sim(vec_a: &SpVec32, vec_b: &SpVec32, a_weight: f32) -> f32 { if !vec_a.could_overlap(vec_b) { return 0.0; } let sc = vec_a.scalar(vec_b); let ldiff = ((vec_a.get_length() * a_weight) + vec_b.get_length()) / (a_weight + 1.0); sc / ldiff } ================================================ FILE: lib/search/src/sentence/producer/filter.rs ================================================ use super::kanji; use crate::{engine, query::Query}; use index_framework::traits::{backend::Backend, dictionary::IndexDictionary}; use jp_utils::JapaneseExt; use sparse_vec::VecExt; use types::jotoba::sentences::Sentence; use vsm::doc_vec::DocVector; pub(crate) fn filter_sentence(query: &Query, sentence: &Sentence) -> bool { if sentence.get_translation(query.lang_param()).is_none() { return false; } if query.form.is_kanji_reading() { let kreading = query .form .as_kanji_reading() .and_then(|i| kanji::get_reading(i)) .unwrap(); return kanji::sentence_matches(sentence, &kreading); } if !query.must_contain.is_empty() { if !by_quot_marks(query, sentence) { return false; } } if !query .tags .iter() .filter_map(|i| i.as_sentence_tag()) .all(|tag| sentence.has_tag(tag)) { return false; } true } fn by_quot_marks(query: &Query, sentence: &Sentence) -> bool { if !by_quot_marks_jp(query, sentence) { return false; } // We're doing filtering for foreign words directly as search engine filter /* sentence .get_translation(query.lang(), query.show_english()) .map(|sentence| by_quot_marks_fe(query, sentence)) .unwrap_or(true) */ true } /* fn by_quot_marks_fe(query: &Query, sentence: &str) -> bool { let sentence = sentence.to_lowercase(); let sentence: Vec<_> = sentence.split(' ').collect(); let iter = query.must_contain.iter().filter(|i| !i.is_japanese()); for needle in iter { if !sentence.contains(&needle.as_str()) { return false; } } true } */ fn by_quot_marks_jp(query: &Query, sentence: &Sentence) -> bool { let jp_sentence = &sentence.japanese; let jp_terms = query.must_contain.iter().filter(|i| i.is_japanese()); for needle in jp_terms { let is_kana = needle.is_kana(); // If kana reading and kana contains needle if (is_kana && sentence.get_kana().contains(needle)) // Or full reading contains || (!is_kana && jp_sentence.contains(needle)) { continue; } return false; } true } /// Vector filter for Sentences filtering based on quoted terms pub struct FeQotTermsVecFilter { mc_terms: Vec, filter_all: bool, } impl FeQotTermsVecFilter { pub fn new(query: &Query) -> Self { // If there is a term that is not indexed and thus can't be found, // filter out all results let mut filter_all = false; let mut mc_terms = vec![]; let index = indexes::get().sentence().foreign(); let ix_dict = index.dict(); 'o: for t in query.must_contain.iter().filter(|i| !i.is_japanese()) { for term in engine::sentences::foreign::all_terms(t).into_iter() { if let Some(v) = ix_dict.get_id(&term) { mc_terms.push(v as u32); continue; } filter_all = true; mc_terms.clear(); break 'o; } } Self { mc_terms, filter_all, } } pub fn filter(&self, sentence: &DocVector) -> bool { if self.filter_all { return false; } if self.mc_terms.is_empty() { return true; } self.mc_terms .iter() .all(|dim| sentence.vec().has_dim(*dim as usize)) } } ================================================ FILE: lib/search/src/sentence/producer/foreign.rs ================================================ use super::filter::{self, FeQotTermsVecFilter}; use crate::{ engine::sentences::foreign, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::{Query, QueryLang}, sentence::{order::foreign::ForeignOrder, Search}, }; use engine::{pushable::FilteredMaxCounter, task::SearchTask}; use types::jotoba::language::Language; /// Producer for sentences by foreign keywords pub struct ForeignProducer<'a> { query: &'a Query, language: Language, } impl<'a> ForeignProducer<'a> { pub fn new(query: &'a Query, language: Language) -> Self { Self { query, language } } fn task(&self) -> SearchTask<'static, foreign::Engine> { let query_str = &self.query.query_str; let query_c = self.query.clone(); let vec_filter = FeQotTermsVecFilter::new(&self.query); let lang = self.query.lang(); SearchTask::with_language(query_str, self.language) .with_result_filter(move |i| filter::filter_sentence(&query_c, *i)) .with_item_filter(move |i| vec_filter.filter(i)) .with_custom_order(ForeignOrder::new(lang)) } } impl<'a> Producer for ForeignProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.task().find_to(out); } fn should_run(&self, _already_found: usize) -> bool { self.query.form.is_normal() && self.query.q_lang == QueryLang::Foreign } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { self.task().estimate_to(out); } } ================================================ FILE: lib/search/src/sentence/producer/kanji.rs ================================================ use japanese::ToKanaExt; use jp_utils::furi::{ segment::{kanji::as_kanji::AsKanjiSegment, AsSegment}, Furigana, }; use sentence_reader::JA_NL_PARSER; use types::jotoba::{ kanji::reading::{Reading, ReadingSearch}, sentences::Sentence, }; pub(crate) fn sentence_matches(sentence: &Sentence, reading: &Reading) -> bool { let lit = reading.get_lit_str(); if reading.is_full_reading() { let parsed_furi = Furigana(&sentence.furigana); let reading_hira = reading.get_raw().to_hiragana(); for i in parsed_furi.segments() { let Some(curr_kanji) = i.as_kanji() else {continue}; if !curr_kanji.literals().contains(&lit) { continue; } if i.get_kana_reading().to_hiragana().contains(&reading_hira) { return true; } } return false; } // Kunyomi let formatted = reading.format_reading_with_literal(); for morph in JA_NL_PARSER.get().unwrap().parse(&sentence.japanese) { let reading = morph.lexeme; if reading == formatted { return true; } } false } pub(crate) fn get_reading(reading: &ReadingSearch) -> Option { let kanji_storage = resources::get().kanji(); let kanji = kanji_storage.by_literal(reading.literal)?; let reading = kanji.find_reading(&reading.reading)?; Some(reading) } ================================================ FILE: lib/search/src/sentence/producer/mod.rs ================================================ pub mod filter; pub mod foreign; mod kanji; pub mod native; pub mod sequence; pub mod tag; ================================================ FILE: lib/search/src/sentence/producer/native.rs ================================================ use super::filter; use crate::{ engine::sentences::native, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::{Query, QueryLang}, sentence::{order::native::NativeOrder, Search}, }; use engine::{pushable::FilteredMaxCounter, task::SearchTask}; use types::jotoba::language::Language; /// Producer for sentences by foreign keywords pub struct NativeProducer<'a> { query: &'a Query, lang: Language, } impl<'a> NativeProducer<'a> { pub fn new(query: &'a Query, lang: Language) -> Self { Self { query, lang } } fn task(&self) -> SearchTask<'static, native::Engine> { let query = self.query.clone(); let query_str = self.jp_reading(); SearchTask::with_language(&query_str, self.lang) .with_result_filter(move |sentence| filter::filter_sentence(&query, *sentence)) .with_custom_order(NativeOrder::new(self.query.lang())) } fn jp_reading(&self) -> String { let mut query_str = self.query.query_str.clone(); if let Some(kanji_reading) = self.query.form.as_kanji_reading() { query_str = kanji_reading.literal.to_string(); } query_str } } impl<'a> Producer for NativeProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.task().find_to(out); } fn should_run(&self, _already_found: usize) -> bool { self.query.form.is_normal() && self.query.q_lang == QueryLang::Japanese } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { self.task().estimate_to(out); } } ================================================ FILE: lib/search/src/sentence/producer/sequence.rs ================================================ use crate::{ executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::Query, sentence::Search, }; use engine::{ pushable::{FilteredMaxCounter, Pushable}, relevance::item::RelItem, }; use types::jotoba::sentences::Sentence; /// Producer for sentence by seq pub struct SequenceProducer<'a> { query: &'a Query, } impl<'a> SequenceProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn sentence(&self) -> Option<&'static Sentence> { let seq = self.query.form.as_sequence()?; resources::get().sentences().by_id(*seq) } } impl<'a> Producer for SequenceProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { if let Some(s) = self.sentence() { out.push(RelItem::new(s, 0.0)); } } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { if let Some(sentence) = self.sentence() { out.push(sentence); } } fn should_run(&self, _already_found: usize) -> bool { self.query.form.is_sequence() } } ================================================ FILE: lib/search/src/sentence/producer/tag.rs ================================================ use crate::{ executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::{Query, Tag}, sentence::Search, }; use engine::{ pushable::FilteredMaxCounter, pushable::{PushMod, Pushable}, relevance::item::RelItem, }; use types::jotoba::sentences::Sentence; /// Producer for Tags pub struct TagProducer<'a> { query: &'a Query, } impl<'a> TagProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn find_to

    (&self, out: &mut P) where P: Pushable>, { let tag = self .query .tags .iter() .filter(|i| i.is_jlpt() || i.is_sentence_tag()) .find(|i| i.is_producer()) .unwrap(); self.push_tag(tag, out); } pub fn push_tag

    (&self, tag: &Tag, out: &mut P) where P: Pushable>, { let s_res = resources::get().sentences(); match tag { Tag::SentenceTag(sentence_tag) => self.push_iter(s_res.by_tag(sentence_tag), out), Tag::Jlpt(jlpt) => self.push_iter(s_res.by_jlpt(*jlpt), out), _ => (), } } fn push_iter(&self, iter: I, out: &mut P) where P: Pushable>, I: Iterator, { let mut c = 0; for w in iter { let item = RelItem::new(w, c as f32); if out.push(item) { c += 1; if c >= 1000 { break; } } } } } impl<'a> Producer for TagProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.find_to(out); } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { let mut m = PushMod::new(out, |i: RelItem<&Sentence>| i.item); self.find_to(&mut m); } fn should_run(&self, _already_found: usize) -> bool { self.query.query_str.is_empty() && self .query .tags .iter() // Only run for jlpt and sentence tags .filter(|i| i.is_jlpt() || i.is_sentence_tag()) .any(|i| i.is_producer()) } } ================================================ FILE: lib/search/src/sentence/result.rs ================================================ use jp_utils::furi::{segment::SegmentRef, seq::FuriSequence}; use types::jotoba::language::{param::AsLangParam, Language}; use crate::executor::out_builder::OutputAddable; /// Additional result data for a sentence search #[derive(Clone, Copy, Default, Debug)] pub struct ResData { pub hidden: bool, } impl ResData { pub fn new(hidden: bool) -> Self { Self { hidden } } } impl OutputAddable for ResData {} /// A displayable sentence #[derive(Clone, Debug)] pub struct Sentence { pub id: u32, pub content: &'static str, pub furigana: &'static str, pub translation: &'static str, pub language: Language, pub eng: Option, } impl Sentence { #[inline] pub fn furigana_pairs<'a>(&'a self) -> Vec> { // Can unwrap here since we check and fix all sentences at preprocessing. FuriSequence::parse_ref(self.furigana).unwrap().into_parts() } #[inline] pub fn get_english(&self) -> Option<&str> { self.eng.as_deref() } #[inline] pub fn from_m_sentence( s: &'static types::jotoba::sentences::Sentence, lang: impl AsLangParam, ) -> Option { let translation = s.get_translation(lang)?; Some(Self { id: s.id, translation, content: &s.japanese, furigana: &s.furigana, eng: None, language: lang.as_lang().language(), }) } } ================================================ FILE: lib/search/src/word/filter.rs ================================================ use crate::query::Query; use jp_utils::JapaneseExt; use std::borrow::Borrow; use types::jotoba::words::Word; pub struct WordFilter { query: Query, jlpt_lvl: Option, } impl WordFilter { pub fn new(query: Query) -> Self { let jlpt_lvl = query.tags.iter().find_map(|i| i.as_jlpt()); Self { query, jlpt_lvl } } /// Returns `true` for all words the query has a filter for aka if the word should be filtered out of the results #[inline] pub fn filter_word>(&self, word: W) -> bool { #[inline] fn inner(wf: &WordFilter, word: &Word) -> Option<()> { wf.by_misc_tags(word)?; wf.by_language(word)?; wf.by_pos_tags(word)?; wf.by_jlpt(word)?; wf.by_katakana_tag(word)?; wf.by_quot_marks(word)?; Some(()) } inner(self, word.borrow()).is_none() } #[inline] fn by_language(&self, w: &Word) -> Option<()> { w.has_language(self.query.lang_param()).then(|| ()) } #[inline] fn by_katakana_tag(&self, w: &Word) -> Option<()> { let has_tag = self.query.has_tag(crate::query::Tag::Katakana); (!has_tag || w.get_reading_str().is_katakana()).then(|| ()) } #[inline] fn by_jlpt(&self, w: &Word) -> Option<()> { // Ignore if not set if self.jlpt_lvl.is_none() { return Some(()); } (w.get_jlpt_lvl() == self.jlpt_lvl).then(|| ()) } #[inline] fn by_pos_tags(&self, w: &Word) -> Option<()> { w.has_all_pos_iter(self.query.get_part_of_speech_tags()) .then(|| ()) } #[inline] fn by_misc_tags(&self, w: &Word) -> Option<()> { self.query .get_misc_tags() .all(|mt| w.has_misc(mt)) .then(|| ()) } fn by_quot_marks(&self, w: &Word) -> Option<()> { if self.query.must_contain.is_empty() { return Some(()); } let (jp_q_terms, mut fn_q_terms): (Vec<_>, Vec<_>) = self .query .must_contain .iter() .partition(|i| i.is_japanese()); if !fn_q_terms.is_empty() { for i in w.gloss_iter_by_lang(self.query.lang_param()) { let i = i.to_lowercase(); fn_q_terms.retain(|k| !i.contains(k.as_str())); if fn_q_terms.is_empty() { break; } } } if !jp_q_terms.is_empty() { for term in jp_q_terms { self.by_quot_marks_jp(w, &term)?; } } // Success if all quted terms were removed fn_q_terms.is_empty().then(|| ()) } #[inline] fn by_quot_marks_jp(&self, w: &Word, q_term: &str) -> Option<()> { if q_term.is_kana() { if !w.get_kana().contains(q_term) { return None; } } else if !w.reading_iter(false).any(|i| i.reading.contains(q_term)) { return None; } Some(()) } } ================================================ FILE: lib/search/src/word/kanji.rs ================================================ use itertools::Itertools; use jp_utils::{alphabet::Alphabet, tokenize::words_with_alphabet}; use types::jotoba::{kanji::Kanji, words::Word}; /// Retrieves all (up to 10) kanji for words in correct order without duplicates pub fn load_word_kanji_info(words: &[Word]) -> Vec { let kanji_resources = resources::get().kanji(); words .iter() .filter_map(|i| { let kanji = &i.reading.kanji.as_ref()?.reading; Some(words_with_alphabet(&kanji, Alphabet::Kanji)) }) .flatten() .map(|i| i.chars().collect::>()) .flatten() .filter_map(|i| kanji_resources.by_literal(i).cloned()) .unique_by(|i| i.literal) .take(10) .collect() } ================================================ FILE: lib/search/src/word/mod.rs ================================================ pub mod filter; pub mod kanji; pub mod order; pub mod producer; pub mod result; use crate::{ executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::Query, }; use types::jotoba::words::Word; use filter::WordFilter; use producer::{ foreign::{romaji::RomajiProducer, ForeignProducer}, japanese::{number::NumberProducer, sentence_reader::SReaderProducer, NativeProducer}, k_reading::KReadingProducer, regex::RegexProducer, sequence::SeqProducer, tag::TagProducer, }; /// Word search pub struct Search<'a> { query: &'a Query, producer: Vec + 'a>>, filter: WordFilter, } impl<'a> Search<'a> { pub fn new(query: &'a Query) -> Self { let producer: Vec>> = vec![ Box::new(KReadingProducer::new(query)), Box::new(TagProducer::new(query)), Box::new(SeqProducer::new(query)), Box::new(RegexProducer::new(query)), Box::new(SReaderProducer::new(query)), Box::new(NativeProducer::new(query)), Box::new(ForeignProducer::new(query)), Box::new(RomajiProducer::new(query)), Box::new(NumberProducer::new(query)), ]; let filter = WordFilter::new(query.clone()); Self { query, producer, filter, } } } impl<'a> Searchable for Search<'a> { type Item = &'static Word; type OutItem = Word; type ResAdd = result::AddResData; fn get_producer<'s>(&'s self) -> &Vec + 's>> { &self.producer } fn get_query(&self) -> &Query { self.query } fn mod_output(&self, out: &mut OutputBuilder) { if out.output_add.raw_query.is_empty() { out.output_add.raw_query = self.query.raw_query.clone(); } } #[inline] fn to_output_item(&self, item: Self::Item) -> Self::OutItem { let mut item = item.to_owned(); item.adjust_language(self.query.lang_param()); item } #[inline] fn filter(&self, word: &Self::Item) -> bool { self.filter.filter_word(*word) } #[inline] fn max_top_dist(&self) -> Option { if !max_top_dist_filter(&self.query) { return None; } //Some(2.0) None } } #[inline] fn max_top_dist_filter(query: &Query) -> bool { !query.is_regex() && query.form.is_normal() } ================================================ FILE: lib/search/src/word/order/foreign.rs ================================================ use super::REMOVE_PARENTHESES; use engine::relevance::{data::SortData, RelevanceEngine}; use indexes::ng_freq::{term_dist, NgFreqIndex}; use sparse_vec::{SpVec32, VecExt}; use types::jotoba::{ language::{LangParam, Language}, words::Word, }; use vsm::doc_vec::DocVector; pub struct ForeignOrder { query_vec_lang: SpVec32, query_vec_en: Option, lang: Language, } impl ForeignOrder { #[inline] pub fn new() -> Self { Self { query_vec_lang: SpVec32::default(), query_vec_en: None, lang: Language::English, } } #[inline] fn get_query_vec(&self, lang: Language) -> &SpVec32 { if lang == self.lang { &self.query_vec_lang } else if lang == Language::English { // If `lang` is english and not the users lang, `query_vec_en` is always set self.query_vec_en.as_ref().unwrap() } else { // There are only search tasks for users language or english. So the query vector has // to be `query_vec_lang` in case `lang` is the users language, or `query_vec_en` if // the language is english. If there are other search requests, this code must be // adjusted log::error!("Unreachable"); unreachable!() } } #[inline] fn text_sim(&self, word: &Word, lang: Language) -> f32 { let dist = |i: &str| -> f32 { let fmt = REMOVE_PARENTHESES.replace_all(i, "").trim().to_lowercase(); if fmt.is_empty() { return 0.0; } let vec = build_vec(get_ng_index(lang), &fmt); term_dist(self.get_query_vec(lang), &vec) }; word.gloss_iter_by_lang(LangParam::new(lang)) .map(|i| dist(i)) .chain( self.query_vec_en .iter() .map(|_| word.gloss_iter_by_lang(Language::English).map(|i| dist(i))) .flatten(), ) .max_by(|a, b| a.total_cmp(&b)) .unwrap_or(0.0) } } impl RelevanceEngine for ForeignOrder { type OutItem = &'static Word; type IndexItem = DocVector; type Query = SpVec32; #[inline] fn score<'item, 'query>( &self, item: &SortData<'item, 'query, Self::OutItem, Self::IndexItem, Self::Query>, ) -> f32 { let word = item.item(); let lang = item.language().unwrap_or(Language::English); let text_sim = self.text_sim(word, lang); let mut rel_add = 0.0; if text_sim >= 0.5 { let index_item = item.index_item().vec(); let gloss_sim = item.query().scalar(index_item); rel_add += gloss_sim * 100.0; } (rel_add + text_sim) / 2.0 } fn init(&mut self, init: engine::relevance::RelEngineInit) { let lang = init.language.unwrap(); let query = init.query.to_lowercase(); self.query_vec_lang = build_vec(get_ng_index(lang), &query); if lang != Language::English { self.query_vec_en = Some(build_vec(get_ng_index(Language::English), &query)); } self.lang = lang; } } #[inline] fn get_ng_index(lang: Language) -> &'static NgFreqIndex { indexes::get().word().foreign(lang).unwrap().ng_index() } #[inline] pub fn build_vec(index: &NgFreqIndex, term: &str) -> SpVec32 { index.build_custom_vec(term, |_freq, _tot| 1.0) } ================================================ FILE: lib/search/src/word/order/kanji_reading.rs ================================================ use engine::relevance::RelevanceEngine; use types::jotoba::words::Word; pub struct KanjiReadingRelevance; impl RelevanceEngine for KanjiReadingRelevance { type OutItem = &'static Word; type IndexItem = u32; type Query = String; #[inline] fn score<'item, 'query>( &self, item: &engine::relevance::data::SortData< 'item, 'query, Self::OutItem, Self::IndexItem, Self::Query, >, ) -> f32 { let word = item.item(); let mut score: f32 = 0.0; if word.is_common() { score += 100.0; } if let Some(jlpt) = word.get_jlpt_lvl() { score += jlpt as f32 * 10.0; } if score == 0.0 { // Show shorter words on top if they aren't important let reading_len = word.reading.get_reading().reading.chars().count(); //score = 100usize.saturating_sub(reading_len * 2); score = (0f32).max(100.0 - reading_len as f32 * 2.0); } else { score += 100.0; } score } } ================================================ FILE: lib/search/src/word/order/mod.rs ================================================ pub mod foreign; pub mod kanji_reading; pub mod native; pub mod regex; use once_cell::sync::Lazy; /// A Regex matching parentheses and its contents pub(crate) static REMOVE_PARENTHESES: Lazy<::regex::Regex> = Lazy::new(|| ::regex::Regex::new("\\(.*\\)").unwrap()); ================================================ FILE: lib/search/src/word/order/native.rs ================================================ use engine::relevance::{data::SortData, RelevanceEngine}; use indexes::ng_freq::{term_dist, NgFreqIndex}; use japanese::ToKanaExt; use jp_utils::JapaneseExt; use ngindex::{item::IndexItem, termset::TermSet}; use sparse_vec::{SpVec32, VecExt}; use types::jotoba::words::Word; pub struct NativeOrder { orig_query: String, orig_query_ts: Option, query_hw: String, /// Word index in sentence reader w_index: Option, query_vec: SpVec32, } impl NativeOrder { #[inline] pub fn new(orig_query: String) -> Self { Self { orig_query, orig_query_ts: None, w_index: None, query_vec: SpVec32::empty(), query_hw: String::new(), } } /// Set a custom sentence reader word index pub fn with_w_index(mut self, index: usize) -> Self { self.w_index = Some(index); self } pub fn with_oquery_ts(mut self, ts: TermSet) -> Self { self.orig_query_ts = Some(ts); self } #[inline] fn exceeded_threshold<'i, 'q, A, B, C>(item: &SortData<'i, 'q, A, B, C>, score: f32) -> bool { item.threshold().map(|th| score < th).unwrap_or(false) } #[inline] fn text_sim(&self, word: &Word) -> f32 { word.reading_iter(true) .map(|i| self.reading_sim(&i.reading.to_halfwidth().to_hiragana())) .max_by(|a, b| a.total_cmp(b)) .unwrap_or(0.0) } #[inline] fn reading_sim(&self, reading: &str) -> f32 { let vec = build_ng_vec(reading); term_dist(&vec, &self.query_vec) } } impl RelevanceEngine for NativeOrder { type OutItem = &'static Word; type IndexItem = IndexItem; type Query = TermSet; fn score<'item, 'query>( &self, item: &SortData<'item, 'query, Self::OutItem, Self::IndexItem, Self::Query>, ) -> f32 { let word = item.item(); let mut score = item.index_item().dice(item.query()); // If alternative reading matches query exactly if Self::exceeded_threshold(item, score) { return 0.0; } score *= self.text_sim(word); if let Some(ref o_ts) = self.orig_query_ts { if self.w_index.unwrap_or(0) == 0 { let new = item.index_item().dice(o_ts); if new > score { score = new; } else { score *= 0.7; } } } if Self::exceeded_threshold(item, score) { return 0.0; } let kana = word.reading.kana.reading.to_halfwidth().to_hiragana(); // Words with query as substring have more relevance // スイス: スイス人 > スパイス if !kana.contains(&self.query_hw) { //score *= 0.8; } if Self::exceeded_threshold(item, score) { return 0.0; } if kana != self.orig_query && word.get_reading().reading.to_halfwidth() != self.orig_query { score *= 0.7; } if Self::exceeded_threshold(item, score) { return 0.0; } if word.jlpt_lvl.is_none() { score *= 0.999; } // Is common if !word.is_common() { score *= 0.999; } //let reading_len = utils::real_string_len(&reading); /* if reading_len == 1 && reading.is_kanji() { let kanji = reading.chars().next().unwrap(); let norm = indexes::get() .kanji() .reading_freq() .norm_reading_freq(kanji, word.get_kana()); if let Some(_read_freq) = norm { //score += read_freq; } } */ score } fn init(&mut self, init: engine::relevance::RelEngineInit) { self.query_vec = build_ng_vec(&init.query.to_halfwidth().to_hiragana()); self.query_hw = init.query.to_halfwidth().to_hiragana(); } } #[inline] fn ng_freq_index() -> &'static NgFreqIndex { indexes::get().word().native().tf_index() } #[inline] fn build_ng_vec(term: &str) -> SpVec32 { ng_freq_index().build_custom_vec(term, |freq, tot| (tot / freq).log2()) } ================================================ FILE: lib/search/src/word/order/regex.rs ================================================ use crate::query::regex::RegexSQuery; use types::jotoba::words::Word; use utils::real_string_len; /// Order for regex-search results pub fn regex_order(word: &Word, found_in: &str, _query: &RegexSQuery) -> usize { let mut score: usize = 100; if !word .reading .alternative .iter() .any(|i| i.reading == found_in) { score += 20; } if word.is_common() { score += 30; } if let Some(jlpt) = word.get_jlpt_lvl() { score += 10 + (jlpt * 2) as usize; } // Show shorter words more on top score = score.saturating_sub(real_string_len(&word.get_reading().reading) * 3); score } ================================================ FILE: lib/search/src/word/producer/foreign/mod.rs ================================================ pub mod romaji; pub mod task; use crate::{ executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::{Query, QueryLang}, word::Search, }; use engine::pushable::FilteredMaxCounter; use task::ForeignSearch; use types::jotoba::language::Language; /// Producer for words by foreign query pub struct ForeignProducer<'a> { query: &'a Query, } impl<'a> ForeignProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } } impl<'a> Producer for ForeignProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { // convert WordOutput -> Word //let mut p_mod = PushMod::new(out, |i: RelItem| i.map_item(|i| i.word)); let q_str = &self.query.query_str; let lang = self.query.get_search_lang(); ForeignSearch::new(self.query, q_str, lang) .task() .find_to(out); // Add english results if lang != Language::English && self.query.show_english() { ForeignSearch::new(self.query, q_str, Language::English) .task() .find_to(out); } } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { let q_str = &self.query.query_str; let lang = self.query.get_search_lang(); ForeignSearch::new(self.query, q_str, lang) .task() .estimate_to(out); // Add english results if lang != Language::English && self.query.show_english() { ForeignSearch::new(self.query, q_str, Language::English) .task() .estimate_to(out); } } fn should_run(&self, _already_found: usize) -> bool { self.query.q_lang == QueryLang::Foreign && !self.query.query_str.is_empty() } } ================================================ FILE: lib/search/src/word/producer/foreign/romaji.rs ================================================ use japanese::guessing::could_be_romaji; use crate::{ engine::words::native::Engine, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::{Query, QueryLang}, word::{producer::japanese::task::NativeSearch, Search}, }; use engine::{pushable::FilteredMaxCounter, task::SearchTask}; pub struct RomajiProducer<'a> { query: &'a Query, } impl<'a> RomajiProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn hira_query(&self) -> String { japanese::to_hira_fmt(&self.query.query_str) } fn kk_query(&self) -> String { japanese::to_kk_fmt(&self.query.query_str) } fn kk_task(&self) -> SearchTask<'static, Engine> { let hira_query_str = self.kk_query(); NativeSearch::new(self.query, &hira_query_str).task() } fn hira_task(&self) -> SearchTask<'static, Engine> { let hira_query_str = self.hira_query(); NativeSearch::new(self.query, &hira_query_str) .with_custom_original_query(&hira_query_str) .task() } } impl<'a> Producer for RomajiProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.hira_task().find_to(out); self.kk_task().find_to(out); } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { self.hira_task().estimate_to(out); self.kk_task().estimate_to(out); } fn should_run(&self, already_found: usize) -> bool { already_found < 100 // Don't run on jp input && self.query.q_lang == QueryLang::Foreign && could_be_romaji(&self.query.query_str) } } ================================================ FILE: lib/search/src/word/producer/foreign/task.rs ================================================ use engine::task::SearchTask; use types::jotoba::language::Language; use crate::{ engine::words::foreign::Engine, query::Query, word::{filter::WordFilter, order::foreign::ForeignOrder}, }; /// Helper for creating SearchTask for foreign queries pub struct ForeignSearch<'a> { query: &'a Query, query_str: &'a str, language: Language, } impl<'a> ForeignSearch<'a> { pub(crate) fn new(query: &'a Query, query_str: &'a str, language: Language) -> Self { Self { query, query_str, language, } } pub fn task(&self) -> SearchTask<'static, Engine> { let filter = WordFilter::new(self.query.clone()); SearchTask::with_language(self.query_str, self.language) .with_custom_order(ForeignOrder::new()) .with_result_filter(move |item| !filter.filter_word(*item)) } } ================================================ FILE: lib/search/src/word/producer/japanese/mod.rs ================================================ pub mod number; pub mod sentence_reader; pub mod task; use crate::{ engine::words::native::Engine, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::{Query, QueryLang}, word::Search, }; use engine::{pushable::FilteredMaxCounter, task::SearchTask}; use task::NativeSearch; /// Produces search results for native search input pub struct NativeProducer<'a> { query: &'a Query, } impl<'a> NativeProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn task(&self) -> SearchTask<'static, Engine> { NativeSearch::new(self.query, &self.query.query_str).task() } } impl<'a> Producer for NativeProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.task().find_to(out); } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { self.task().estimate_to(out) } fn should_run(&self, already_found: usize) -> bool { if self.query.q_lang != QueryLang::Japanese || self.query.query_str.is_empty() || self.query.form.is_kanji_reading() { return false; } already_found < 5 } } ================================================ FILE: lib/search/src/word/producer/japanese/number.rs ================================================ use crate::{ executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::Query, word::Search, }; use engine::pushable::FilteredMaxCounter; use japanese_number_parser::JapaneseNumberFormatter; use jp_utils::JapaneseExt; use log::debug; /// Produces a number if the query is a Japanese number pub struct NumberProducer<'a> { query: &'a Query, } impl<'a> NumberProducer<'a> { #[inline] pub fn new(query: &'a Query) -> Self { Self { query } } } impl<'a> Producer for NumberProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { let query = &self.query.query_str; if let Some(number) = JapaneseNumberFormatter::new().format(&query) { debug!("Found number: {number:?}"); out.output_add.number = Some(number); } } fn estimate_to(&self, _out: &mut FilteredMaxCounter<::Item>) {} fn should_run(&self, _already_found: usize) -> bool { let query_str = &self.query.query_str; !query_str.is_empty() // Don't parse if query is a regular number && query_str .to_halfwidth() .parse::() .is_err() } } ================================================ FILE: lib/search/src/word/producer/japanese/sentence_reader.rs ================================================ use engine::{ pushable::FilteredMaxCounter, relevance::{data::SortData, RelevanceEngine}, task::SearchTask, }; use jp_utils::{ furi::segment::{AsSegment, SegmentRef}, JapaneseExt, }; use ngindex::{item::IndexItem, termset::TermSet}; use sentence_reader::{output::ParseResult, Parser, Part, Sentence}; use types::jotoba::words::{part_of_speech::PosSimple, Word}; use crate::{ engine::{names, words::native::Engine}, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::{Query, QueryLang}, word::{ order::native::NativeOrder, result::{InflectionInformation, SentenceInfo}, Search, }, }; use super::task::NativeSearch; /// Producer for sentence reader and inflection information pub struct SReaderProducer<'a> { query: &'a Query, parsed: ParseResult, } impl<'a> SReaderProducer<'a> { pub fn new(query: &'a Query) -> Self { let parsed = Parser::new(&query.query_str).parse(); Self { query, parsed } } /// Search task for inflected word fn infl_task(&self) -> Option> { let infl = self.parsed.as_inflected_word()?; let normalized = infl.get_normalized(); let original_query = ::make_query(&self.query.query_str, None)?; let search = NativeSearch::new(self.query, &normalized); let o_query = search.original_query().to_string(); let order = NativeOrder::new(o_query).with_oquery_ts(original_query); Some(search.task().with_custom_order(order)) } /// Selected word index within the sentence #[inline] fn sentence_index(&self) -> usize { self.parsed .as_sentence() .map(|s| self.query.word_index.clamp(0, s.word_count() - 1)) .unwrap_or(0) } /// Selected word in the sentence #[inline] fn sentence_word(&self) -> Option<&Part> { let sentence = self.parsed.as_sentence()?; let index = self.sentence_index(); sentence.get_at(index) } /// Normalized search task for sentences fn snt_task_normalized(&self) -> Option> { let word = self.sentence_word().unwrap(); let inflected = word.get_inflected(); let normalized = word.get_normalized(); let search = NativeSearch::new(self.query, &normalized); let order = NativeOrder::new(inflected).with_w_index(self.sentence_index()); Some(search.task().with_custom_order(order)) } /// Inflected search task for an inflected word in a sentence fn snt_task_infl(&self) -> Option> { let word = self.sentence_word().unwrap(); let inflected = word.get_inflected(); let search = NativeSearch::new(self.query, &inflected); let o_query = search.original_query().to_string(); let order = NativeOrder::new(o_query).with_w_index(self.sentence_index()); Some(search.task().with_custom_order(order)) } } impl<'a> Producer for SReaderProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { if let ParseResult::InflectedWord(infl) = &self.parsed { self.infl_task().unwrap().find_to(out); out.output_add.inflection = InflectionInformation::from_part(infl); return; } if let ParseResult::Sentence(mut sentence) = self.parsed.clone() { set_furigana(&mut sentence); self.snt_task_normalized().unwrap().find_to(out); let word = self.sentence_word().unwrap(); if word.get_inflected() != word.get_normalized() { self.snt_task_infl().unwrap().find_to(out); } out.output_add.inflection = InflectionInformation::from_part(word); out.output_add.raw_query = word.get_inflected(); out.output_add.sentence = Some(SentenceInfo { parts: Some(sentence.clone()), index: self.query.word_index, query: word.get_normalized(), }); } } fn should_run(&self, already_found: usize) -> bool { if self.parsed.is_none() || self.query.q_lang != QueryLang::Japanese || !self.query.form.is_normal() || self.query.query_str.is_empty() { return false; } // Always run inlfections if self.parsed.is_inflected_word() { return true; } // Disable sentence reader if already found some words if already_found > 0 { return false; } let term_in_db = word_exists(&self.query.query_str); // For sentences only run if the query is not a term in the db !term_in_db } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { if let Some(mut infl) = self.infl_task() { infl.estimate_to(out); return; } if self.parsed.is_sentence() { self.snt_task_normalized().unwrap().estimate_to(out); let word = self.sentence_word().unwrap(); if word.get_inflected() != word.get_normalized() { self.snt_task_infl().unwrap().estimate_to(out); } } } } /// Returns `true` if the word exists in all words fn word_exists(term: &str) -> bool { let task = SearchTask::::new(term).with_limit(1); let query = term.to_string(); let mut task = task.with_item_filter(move |i| { resources::get() .words() .by_sequence(*i.item()) .unwrap() .has_reading(&query) }); let res = task.find(); res.len() > 0 } /// Generates furigana for a sentence fn set_furigana(s: &mut Sentence) { for part in s.iter_mut() { let p = part.clone(); part.set_furigana(|inp| furigana_by_reading(inp, &p)) } } /// Returns furigana of the given `morpheme` if available fn furigana_by_reading(morpheme: &str, part: &sentence_reader::Part) -> Option { word_furi(morpheme, part).or_else(|| name_furi(morpheme)) } fn name_furi(morpheme: &str) -> Option { let morpheme_c = morpheme.to_string(); let mut task = SearchTask::::new(morpheme) .with_limit(1) .with_result_filter(move |n| n.get_reading() == morpheme_c && n.has_kanji()); let res = task.find(); if res.total_items != 1 { return None; } let name = res.get(0).unwrap().item; let kanji = name.kanji.as_ref().unwrap(); Some(SegmentRef::new_kanji(&kanji, &[&name.kana]).encode()) } fn word_furi(morpheme: &str, part: &sentence_reader::Part) -> Option { let word_storage = resources::get().words(); let pos = sentence_reader::part::wc_to_simple_pos(&part.word_class_raw()); let morph = morpheme.to_string(); let mut st = SearchTask::::new(morpheme) .with_limit(10) .with_custom_order(WordFuriOrder::new(pos, morpheme.to_string())) .with_result_filter(move |i| i.has_reading(&morph)); st.find().get(0).and_then(|word| { word_storage .by_sequence(word.item.sequence) .and_then(|i| i.furigana.clone()) }) } struct WordFuriOrder { pos: Option, morph: String, } impl WordFuriOrder { #[inline] fn new(pos: Option, morph: String) -> Self { Self { pos, morph } } } impl RelevanceEngine for WordFuriOrder { type OutItem = &'static Word; type IndexItem = IndexItem; type Query = TermSet; fn score<'item, 'query>( &self, item: &SortData<'item, 'query, Self::OutItem, Self::IndexItem, Self::Query>, ) -> f32 { let mut score = 0.0; let i = item.item(); let reading = &i.get_reading().reading; let reading_len = utils::real_string_len(reading); if reading == &self.morph { score += 100.0; } if reading_len == 1 && reading.is_kanji() { let kanji = reading.chars().next().unwrap(); let kana = i.get_kana(); let norm = indexes::get() .kanji() .reading_freq() .norm_reading_freq(kanji, kana); if let Some(norm) = norm { score += norm * 10.0; } } if let Some(ref pos) = self.pos { if i.has_pos(&[*pos]) { score += 20.0; } else { //score = score.saturating_sub(30); score = (score - 30.0).max(0.0); } } if i.is_common() { score += 2.0; } if i.get_jlpt_lvl().is_some() { score += 2.0; } score } } ================================================ FILE: lib/search/src/word/producer/japanese/task.rs ================================================ use engine::task::SearchTask; use jp_utils::JapaneseExt; use crate::{ engine::words::native::Engine, query::Query, word::{filter::WordFilter, order::native::NativeOrder}, }; /// Helper for creating SearchTask for foreign queries pub struct NativeSearch<'a> { query: &'a Query, query_str: &'a str, cust_original: Option<&'a str>, threshold: f32, } impl<'a> NativeSearch<'a> { #[inline] pub(crate) fn new(query: &'a Query, query_str: &'a str) -> Self { // Kanji queries are shorter so we need a lower threshold to not filter too many different words for short queries let kana_count: usize = query_str.chars().filter(|i| i.is_kana()).count(); let kanji_count: usize = query_str.chars().filter(|i| i.is_kanji()).count(); let kanji_query = kanji_count >= (kana_count * 2); let threshold = if kanji_query || (kanji_count + kana_count < 5) { 0.15 } else { 0.3 }; Self { query, query_str, cust_original: None, threshold, } } pub fn with_custom_original_query(mut self, query: &'a str) -> Self { self.cust_original = Some(query); self } pub fn with_threshold(mut self, threshold: f32) -> Self { self.threshold = threshold; self } pub fn task(&self) -> SearchTask<'static, Engine> { let filter = WordFilter::new(self.query.clone()); let original_query = self.original_query().to_string(); SearchTask::new(self.query_str) .with_custom_order(NativeOrder::new(original_query)) .with_result_filter(move |item| !filter.filter_word(*item)) .with_threshold(self.threshold) } #[inline] pub fn original_query(&self) -> &str { self.cust_original .as_ref() .unwrap_or(&self.query.raw_query.as_str()) } } ================================================ FILE: lib/search/src/word/producer/k_reading.rs ================================================ use engine::{ pushable::FilteredMaxCounter, pushable::{PushMod, Pushable}, relevance::item::RelItem, task::SearchTask, }; use types::jotoba::{kanji::Kanji, words::Word}; use crate::{ engine::words::native::k_reading, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::Query, word::{order::kanji_reading::KanjiReadingRelevance, Search}, }; /// Kanji reading search producer pub struct KReadingProducer<'a> { query: &'a Query, } impl<'a> KReadingProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } /// Returns the kanji from the search. Returns `None` if kanji does not exist or doesn't /// match the reading from the search fn get_kanji(&self) -> Option<&'static Kanji> { let reading = self.query.form.as_kanji_reading()?; let kanji_storage = resources::get().kanji(); let kanji = kanji_storage.by_literal(reading.literal)?; kanji.has_reading(&reading.reading).then(|| kanji) } /// Returns a query for the kanji reading index for the search query fn kr_query(&self) -> Option { let kanji = self.get_kanji()?; let reading = self.query.form.as_kanji_reading().unwrap(); Some(format!("{}{}", kanji.literal, reading.reading)) } fn find_to

    (&self, out: &mut P) where P: Pushable>, { let engine_query = match self.kr_query() { Some(q) => q, None => return, }; SearchTask::::new(&engine_query) .with_custom_order(KanjiReadingRelevance) .find_to(out); } } impl<'a> Producer for KReadingProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.find_to(out); } fn should_run(&self, _already_found: usize) -> bool { self.query.form.is_kanji_reading() } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { let mut m = PushMod::new(out, |i: RelItem<&Word>| i.item); // TODO: use estimate_to here self.find_to(&mut m); } } ================================================ FILE: lib/search/src/word/producer/mod.rs ================================================ pub mod foreign; pub mod japanese; pub mod k_reading; pub mod regex; pub mod sequence; pub mod tag; ================================================ FILE: lib/search/src/word/producer/regex.rs ================================================ use itertools::Itertools; use types::jotoba::words::Word; use crate::{ engine::words::native::regex, executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::{regex::RegexSQuery, Query}, word::{order::regex::regex_order, Search}, }; use engine::{ pushable::FilteredMaxCounter, pushable::{PushMod, Pushable}, relevance::item::RelItem, }; pub struct RegexProducer<'a> { query: &'a Query, } impl<'a> RegexProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn find_to_unsorted>>( &self, out: &mut P, ) -> Option<()> { let regex_query = self.query.as_regex_query()?; search(®ex_query, |_, _| 0, out); Some(()) } fn find_to>>(&self, out: &mut P) -> Option<()> { let regex_query = self.query.as_regex_query()?; search(®ex_query, |w, r| regex_order(w, r, ®ex_query), out); Some(()) } } impl<'a> Producer for RegexProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.find_to(out); } fn should_run(&self, _already_found: usize) -> bool { self.query.as_regex_query().is_some() } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { let mut mid = PushMod::new(out, |i: RelItem<&'static Word>| i.item); self.find_to_unsorted(&mut mid); } } pub fn search<'a, F, P>(query: &'a RegexSQuery, sort: F, out: &mut P) where F: Fn(&'a Word, &'a str) -> usize, P: Pushable>, { let word_resources = resources::get().words(); let index = indexes::get().word().regex(); let possible_results = regex::find_words(index, &query.get_chars()); for seq_id in possible_results.into_iter().sorted() { let word = word_resources.by_sequence(seq_id).unwrap(); let item_iter = word .reading_iter(true) .filter_map(|i| query.matches(&i.reading).then(|| (word, &i.reading))) .map(|(word, reading)| { let order = sort(word, reading) as f32; RelItem::new(word, order) }); for i in item_iter { out.push(i); } } } ================================================ FILE: lib/search/src/word/producer/sequence.rs ================================================ use crate::{ executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::Query, word::Search, }; use engine::{pushable::FilteredMaxCounter, pushable::Pushable, relevance::item::RelItem}; use types::jotoba::words::Word; /// Producer for a Word by its sequence id pub struct SeqProducer<'a> { query: &'a Query, } impl<'a> SeqProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } // Find the word pub fn word(&self) -> Option<&'static Word> { let seq = *self.query.form.as_sequence()?; resources::get().words().by_sequence(seq) } } impl<'a> Producer for SeqProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { if let Some(word) = self.word() { out.push(RelItem::new(word, 0.0)); } } fn should_run(&self, _already_found: usize) -> bool { self.query.form.is_sequence() } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { if let Some(word) = self.word() { out.push(word); } } } ================================================ FILE: lib/search/src/word/producer/tag.rs ================================================ use types::jotoba::words::Word; use crate::{ executor::{out_builder::OutputBuilder, producer::Producer, searchable::Searchable}, query::{Query, Tag}, word::Search, }; use engine::{ pushable::FilteredMaxCounter, pushable::{PushMod, Pushable}, relevance::item::RelItem, }; pub struct TagProducer<'a> { query: &'a Query, } impl<'a> TagProducer<'a> { pub fn new(query: &'a Query) -> Self { Self { query } } fn get_producer_tag(&self) -> Option<&Tag> { self.query .tags .iter() .filter(|i| i.is_producer() && !i.is_sentence_tag()) // Use tag with fewest items that it'll produce to reduce the amount of items that have to be filtered .map(|i| (self.tag_len(i).unwrap_or(usize::MAX), i)) .min_by_key(|i| i.0) .map(|i| i.1) } fn find_to

    (&self, out: &mut P) where P: Pushable>, { // Find first producer tag. All other tags are treated as filter let producer_tag = self.get_producer_tag().unwrap(); self.find_words(out, producer_tag); } fn find_words

    (&self, out: &mut P, tag: &Tag) where P: Pushable>, { let words = resources::get().words(); match tag { Tag::PartOfSpeech(pos) => self.push_iter(words.by_pos_simple(*pos), out), Tag::Misc(m) => self.push_iter(words.by_misc(*m), out), Tag::Jlpt(jlpt) => self.push_iter(words.by_jlpt(*jlpt), out), Tag::Katakana => self.push_iter(words.katakana(), out), Tag::IrregularIruEru => self.push_iter(words.irregular_ichidan(), out), _ => (), } } fn push_iter(&self, iter: I, out: &mut P) where P: Pushable>, I: Iterator + DoubleEndedIterator, { let mut c = 0; for w in iter.rev() { let item = RelItem::new(w, (1000 - c) as f32); if out.push(item) { c += 1; if c >= 1000 { break; } } } } /// Returns the amount of words a given tag has assigned/indexed #[inline] fn tag_len(&self, tag: &Tag) -> Option { let w_retr = resources::get().words(); match tag { Tag::PartOfSpeech(p) => w_retr.pos_simple_len(p), Tag::Misc(m) => w_retr.misc_len(m), Tag::Jlpt(j) => w_retr.jlpt_len(*j), Tag::IrregularIruEru => Some(w_retr.irregular_ichidan_len()), Tag::Katakana => Some(w_retr.katakana_len()), _ => None, } } } impl<'a> Producer for TagProducer<'a> { type Target = Search<'a>; fn produce( &self, out: &mut OutputBuilder< ::Item, ::ResAdd, >, ) { self.find_to(out); } fn should_run(&self, _already_found: usize) -> bool { // Only run this producer if there is no query (except tags) and there are tags which can produce output self.query.query_str.is_empty() && self.get_producer_tag().is_some() } fn estimate_to(&self, out: &mut FilteredMaxCounter<::Item>) { let mut mid = PushMod::new(out, |i: RelItem<&Word>| i.item); self.find_to(&mut mid); } } ================================================ FILE: lib/search/src/word/result.rs ================================================ use types::jotoba::words::inflection::Inflection; use crate::executor::out_builder::OutputAddable; #[derive(Default, Clone, Debug)] pub struct AddResData { pub sentence: Option, pub inflection: Option, pub raw_query: String, pub number: Option, } impl OutputAddable for AddResData { #[inline] fn is_empty(&self) -> bool { self.sentence.is_none() && self.inflection.is_none() } } #[derive(Default, Clone, Debug)] pub struct SentenceInfo { pub parts: Option, pub index: usize, pub query: String, } #[derive(Debug, Clone, PartialEq)] pub struct InflectionInformation { /// Normalized form of the word pub lexeme: String, /// All inflections pub inflections: Vec, } impl AddResData { pub fn has_sentence(&self) -> bool { self.sentence.is_some() } pub fn has_inflection(&self) -> bool { self.inflection.is_some() } pub fn sentence_parts(&self) -> Option<&sentence_reader::Sentence> { self.sentence.as_ref().and_then(|i| i.parts.as_ref()) } pub fn sentence_index(&self) -> usize { self.sentence.as_ref().map(|i| i.index).unwrap_or(0) } } impl InflectionInformation { pub fn from_part(part: &sentence_reader::Part) -> Option { if !part.has_inflections() { return None; } Some(InflectionInformation { lexeme: part.get_normalized(), inflections: part.inflections().to_vec(), }) } } pub fn selected(curr: usize, selected: usize) -> &'static str { if curr == selected { "selected" } else { "" } } ================================================ FILE: lib/search/tests/search_test.rs ================================================ use jp_utils::JapaneseExt; use search::{ executor::search_result::SearchResult, query::{parser::QueryParser, Query, UserSettings}, word::{kanji::load_word_kanji_info, result::AddResData}, SearchExecutor, }; use test_case::test_case; use types::jotoba::{ language::Language, search::SearchTarget, words::{inflection::Inflection, part_of_speech::PosSimple, Word}, }; fn search(query: &Query) -> SearchResult { let search = search::word::Search::new(query); SearchExecutor::new(search).run() } /// ----------- Inflections --------------- /// #[test_case("知らなかった",&[Inflection::Past, Inflection::Negative])] #[test_case("わかりたい",&[Inflection::Tai])] #[test_case("わかりたくない",&[Inflection::Tai, Inflection::Negative])] #[test_case("わかりたくなかった",&[Inflection::Tai, Inflection::Negative, Inflection::Past])] #[test_case("覚えてる",&[Inflection::TeIru])] #[test_case("覚えてない",&[Inflection::TeIru, Inflection::Negative])] #[test_case("覚えてなかった",&[Inflection::TeIru, Inflection::Negative, Inflection::Past])] #[test_case("書いておく",&[Inflection::TeOku])] fn inflections(query_str: &str, exp_infl: &[Inflection]) { wait(); let query = parse_query(query_str, Language::English, SearchTarget::Words); let res = search(&query); assert!(res.inflection.is_some()); let infl_info = res.inflection.as_ref().unwrap(); assert!(utils::same_elements(&infl_info.inflections, exp_infl)); } /// /// ----------- Sentence reader --------------- /// #[test_case("日本語勉強したい", &["日本語","勉強","したい"])] #[test_case("音楽が聞きたい", &["音楽","が","聞きたい"])] fn sentence_reader_test(query_str: &str, exp_parts: &[&str]) { wait(); // let query = parse_query(query_str, Language::English, SearchTarget::Words); let res = search(&query); let sentence = res.sentence.clone(); assert!(sentence.is_some()); let sentence = sentence.unwrap(); let mut exp_iter = exp_parts.iter(); for part in sentence.parts.unwrap().iter() { let exp = exp_iter.next().expect("Expected parts to short"); assert_eq!(&part.get_inflected(), exp); } } /// /// ----------- Kanji (right) --------------- /// // called in 'word_search' #[test_case("音楽")] #[test_case("買う")] #[test_case("宇宙")] #[test_case("宇宙人")] #[test_case("覚える")] fn correct_kanji_shown(query_str: &str) { wait(); let query = make_query(query_str, Language::English); let res = search(&query); let mut exp_kanji: Vec = Vec::new(); for word in &res.items { for kanji in word .get_reading() .reading .chars() .filter(|i| i.is_kanji() && !i.is_roman_letter()) { if !exp_kanji.contains(&kanji) { exp_kanji.push(kanji); } } } let kanji = load_word_kanji_info(&res.items); for (pos, kanji) in kanji.into_iter().enumerate() { assert_eq!(exp_kanji[pos], kanji.literal); } } /// ----------- Simple word search ------------- /// #[test_case("musik", Language::German, "音楽")] #[test_case("音楽", Language::German, "音楽")] #[test_case("バラバラ", Language::German, "バラバラ")] #[test_case("ドイツ", Language::German, "ドイツ")] #[test_case("ドイツ人", Language::German, "ドイツ人")] #[test_case("to sleep", Language::English, "寝る")] #[test_case("買う", Language::English, "買う")] #[test_case("know", Language::German, "知る"; "Find in english too")] #[test_case("remember", Language::German, "覚える"; "Find in english too 2")] #[test_case("think", Language::German, "思う"; "Find in english too 3")] #[test_case("especially", Language::German, "特に"; "Find in english too 4")] // Regex #[test_case("宇宙*行士", Language::German, "宇宙飛行士"; "Regex 1")] #[test_case("宇*", Language::German, "宇宙"; "Regex 2")] #[test_case("宇宙*行士", Language::English, "宇宙飛行士"; "Regex 3")] #[test_case("宇*", Language::English, "宇宙"; "Regex 4")] fn word_search(query_str: &str, language: Language, first_res: &str) { wait(); let query = parse_query(query_str, language, SearchTarget::Words); let res = search(&query); let word = match res.items.get(0) { Some(n) => n, None => return, }; if !word.has_reading(first_res) { panic!("Expected {query_str:?} ({language}) to return {first_res:?} as first result (but was: {:?})", word.get_reading().reading); } } /// ------------- Part of speech filter ----------- /// #[test_case("音楽 #adjective", &[PosSimple::Adjective], &["音楽的", "標題音楽", "電子音楽"]; "Test single tag")] #[test_case("speak #verb", &[PosSimple::Verb], &["話す","話せる"]; "Test foreign inp")] #[test_case("speak #noun", &[PosSimple::Noun], &["言葉"]; "Test unlikely")] fn pos_tag_test(query_str: &str, exp_pos: &[PosSimple], exp_res: &[&str]) { wait(); let query = parse_query(query_str, Language::English, SearchTarget::Words); let res = search(&query); let have_tag = res .items .iter() .all(|i| exp_pos.iter().all(|j| i.has_pos(&[*j]))); assert!(have_tag); assert!(exp_res .iter() .all(|j| res.items.iter().any(|w| w.has_reading(j)))); } /// ----------- JP search Relevance ----------- /// #[test] fn test_jp_search() { wait(); // Expect most important word on top for word in resources::get().words().iter().step_by(317) { let reading = &word.get_reading().reading; word_search(reading, Language::Swedish, reading); } } // ------------ Romaji search ---------------- /// #[test_case("kore",&["これ"])] #[test_case("tokasu", &["溶かす"])] #[test_case("kisuu", &["奇数"])] #[test_case("daijoubu", &["大丈夫"])] #[test_case("jikan", &["時間"])] #[test_case("kono", &["この"])] #[test_case("kanjiru", &["感じる"])] #[test_case("ongaku", &["音楽"])] #[test_case("kimi", &["君"])] #[test_case("jitensha", &["自転車"])] #[test_case("kiku", &["聞く"])] #[test_case("suki", &["好き"])] fn test_romaji(query_str: &str, expected: &[&str]) { wait(); let res = search(&make_query(query_str, Language::English)); for exp in expected.iter() { if !res.iter().take(3).any(|i| i.has_reading(exp)) { panic!("Expected {:?} to find {exp:?} (Romaji search)", query_str); } } } fn make_query(query_str: &str, language: Language) -> Query { Query { query_str: query_str.to_string(), settings: UserSettings { user_lang: language, ..UserSettings::default() }, ..Query::default() } } fn parse_query(query_str: &str, language: Language, q_type: SearchTarget) -> Query { let mut settings = UserSettings::default(); settings.user_lang = language; QueryParser::new(query_str.to_string(), q_type, settings) .parse() .expect("Invaild query passed") } fn load_data() { if resources::is_loaded() || indexes::storage::is_loaded() { return; } rayon::scope(|s| { s.spawn(|_| { resources::load("../../resources/storage_data").unwrap(); }); s.spawn(|_| { indexes::storage::load("../../resources/indexes").unwrap(); }); s.spawn(|_| { sentence_reader::load_parser("../../resources/unidic-mecab"); }) }); } fn wait() { if !resources::is_loaded() && !indexes::storage::is_loaded() && !sentence_reader::is_loaded() { load_data(); return; } indexes::storage::wait(); resources::wait(); sentence_reader::wait(); } ================================================ FILE: lib/sentence_reader/Cargo.toml ================================================ [package] name = "sentence_reader" version = "0.1.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] igo-unidic = { git = "https://github.com/JojiiOfficial/igo-unidic" } once_cell = { version = "1.18.0", default-features = false } localization = { path = "../localization", optional = true } japanese = { path = "../japanese" } types = { path = "../types" } jp_utils = { git = "https://github.com/JojiiOfficial/jp_utils"} [features] default = [] # This feature adds stuff required for Jotoba to work but not necessarily for extern crates, so its made optional jotoba_intern = ["localization"] ================================================ FILE: lib/sentence_reader/src/analyzer.rs ================================================ use crate::grammar::{rule::Rule, rule_set::RuleSet, Analyzer}; use once_cell::sync::Lazy; static RULES: Lazy = Lazy::new(|| Analyzer::new(get_rules())); /// Returns a grammar analyzer with a japanese inflection ruleset pub(crate) fn get_grammar_analyzer() -> &'static Analyzer { &RULES } /// Returns a set of rules for japanese text analyzing fn get_rules() -> RuleSet { // Often used dest rules let end = &[]; let te_ending = &[ "て", "てる", "ます", "しまう", "ない", "た", "てみる", "いる", "ある", "おく", ]; let ru_ending = &[ "て", "てる", "ます", "しまう", "ない", "た", "たり", "ちゃう", "とく", "たい", "られる", "れる", "ば", ]; // \ Often used dest rules // let mut rules = Vec::with_capacity(20); // い rule rules.push(Rule::new("た", end)); rules.push(Rule::new("たり", end)); rules.push(Rule::new("ない", &["て", "た"])); rules.push(Rule::new("たい", &["て", "ない", "た"])); // じゃない rules.push(Rule::new("じゃ", &["ない"])); // て rules.push(Rule::new("て", te_ending)); rules.push(Rule::new("てみる", ru_ending)); rules.push(Rule::new("しまう", ru_ending)); rules.push(Rule::new("おく", ru_ending)); rules.push(Rule::new("てる", ru_ending)); // いる/ある rules.push(Rule::new("いる", ru_ending)); rules.push(Rule::new("ある", ru_ending)); // Masu rules.push(Rule::new("ます", &["た", "ん"])); rules.push(Rule::new("ん", &["です"])); rules.push(Rule::new("です", &["た"])); // passive / 可能形 rules.push(Rule::new("られる", ru_ending)); rules.push(Rule::new("れる", ru_ending)); // ちゃう / しまう rules.push(Rule::new("ちゃう", ru_ending)); rules.push(Rule::new("しまう", ru_ending)); // とく rules.push(Rule::new("とく", ru_ending)); // ば conditional rules.push(Rule::new("ば", end)); // される causative rules.push(Rule::new("さ", &["せる", "れる"])); rules.push(Rule::new("せる", ru_ending)); rules.push(Rule::new("させる", ru_ending)); // Exceptions rules.push(Rule::new("いただき", &["ます"])); // ぬ //rules.push(Rule::new("V", &["ます"])); rules.push(Rule::new("ん", end)); // だった rules.push(Rule::new("た", &["た"])); // Generation/Root rules.push(Rule::new( "V", &[ "た", "たり", "ない", "たい", "て", "てる", "てみる", "いる", "ある", "ます", "られる", "れる", "ちゃう", "しまう", "とく", "ば", "せる", "させる", // the さ of される "さ", // ぬ "ん", ], )); rules.push(Rule::new("AD", &["ない", "た", "て"])); rules.push(Rule::new("NR", &["NR"])); // generate ruleset RuleSet::new(&rules) } ================================================ FILE: lib/sentence_reader/src/grammar/mod.rs ================================================ #![allow(dead_code)] use self::{ rule::{Rule, ToRule}, rule_set::RuleSet, }; pub mod rule; pub mod rule_set; /// A Grammar analyzer #[derive(Clone)] pub struct Analyzer { rules: RuleSet, } impl Analyzer { /// Creates a new Grammar analyzer pub fn new(rules: RuleSet) -> Self { Self { rules } } /// Checks if `inp` can be built with the given ruleset. Returns the index of the last rule /// that was matching. In other words if the return value is equal to `inp.len()`, all input /// rules were matching pub fn check(&self, inp: &[T]) -> usize { if inp.is_empty() { return 0; } let mut pos = 0; let mut last_rule = match self.resolve_to_rule(&inp[0]) { Some(r) => r, None => return pos, }; pos += 1; for part in &inp[pos..] { let rule = match self.resolve_to_rule(part) { Some(r) => r, None => return pos, }; if !last_rule.has_dst(rule.name()) { return pos; } last_rule = rule; pos += 1; } pos } /// Returns `true` if the analyzer has a given rule #[inline] pub fn has_rule(&self, rule: &str) -> bool { self.rules.get_rule(rule).is_some() } /// Checks if a series of Rules can be built with the current set of Rules #[inline] pub fn check_full(&self, inp: &[T]) -> bool { self.check(inp) == inp.len() } /// resolves a rule from `ToRule` to `&Rule` #[inline] fn resolve_to_rule(&self, tr: T) -> Option<&Rule> { tr.to_rule().and_then(|i| self.rules.get_rule(i)) } /// Get a reference to the analyzer's rules. #[inline] pub fn rules(&self) -> &RuleSet { &self.rules } } ================================================ FILE: lib/sentence_reader/src/grammar/rule.rs ================================================ use super::rule_set::ALL_WILDCARD; /// Represents a single rule describing a possible production /// of a grammar #[derive(Clone, Copy)] pub struct Rule { name: &'static str, rhs: &'static [&'static str], } impl Rule { /// Creates a new rule pub fn new(name: &'static str, rhs: &'static [&'static str]) -> Self { Self { name, rhs } } /// Get the rule's name. #[inline] pub fn name(&self) -> &'static str { self.name } /// Get the rule's destination rules #[inline] pub fn rhs(&self) -> &'static [&'static str] { self.rhs } /// Returns `true` if the rule has a dst rule with `name` #[inline] pub fn has_dst(&self, name: &str) -> bool { self.rhs.iter().any(|i| *i == name || *i == ALL_WILDCARD) } } pub trait ToRule { fn to_rule(&self) -> Option<&str>; } impl ToRule for &'static str { #[inline] fn to_rule(&self) -> Option<&str> { Some(self) } } impl ToRule for &T { #[inline] fn to_rule(&self) -> Option<&str> { (*self).to_rule() } } ================================================ FILE: lib/sentence_reader/src/grammar/rule_set.rs ================================================ use super::rule::Rule; use std::{collections::HashMap, fmt::Debug}; pub const ALL_WILDCARD: &str = "*"; #[derive(Clone)] pub struct RuleSet { rules: HashMap<&'static str, Rule>, } impl RuleSet { /// Creates a new set of rules pub fn new(rules: &[Rule]) -> Self { let rules = rules .iter() .map(|i| (i.name(), *i)) .collect::>(); Self { rules } } /// Adds a Rule to the RuleSet pub fn add(&mut self, rule: Rule) -> bool { if self.has_rule(rule.name()) { return false; } // add dummy rule to allow any dst rule if rule.has_dst(ALL_WILDCARD) { self.add_all_wildcard(); } self.rules.insert(rule.name(), rule); true } /// Returns `true` if ruleSet has a rule with `name` pub fn has_rule(&self, name: &str) -> bool { self.rules.contains_key(name) } /// Returns `true` if the RuleSet is complete pub fn check(&self) -> bool { // check that all used dst rules are reachable for (_, rule) in self.rules.iter() { for rhs in rule.rhs() { if *rhs == ALL_WILDCARD { continue; } if !self.rules.contains_key(rhs) { return false; } } } true } /// Returns a rule with `name` or None when no such rule exists in RuleSet #[inline] pub fn get_rule(&self, name: &str) -> Option<&Rule> { self.rules.get(name) } fn add_all_wildcard(&mut self) { if self.has_rule(ALL_WILDCARD) { return; } // add dummy rule that allows any production self.rules .insert(ALL_WILDCARD, Rule::new(ALL_WILDCARD, &[])); } } impl Debug for RuleSet { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for (name, rule) in &self.rules { let mut dst = String::new(); for (pos, d) in rule.rhs().iter().enumerate() { if pos > 0 { dst.push_str(" | "); } dst.push_str(*d); } if dst.is_empty() { continue; } write!(f, "{name} -> {dst}\n")?; } Ok(()) } } ================================================ FILE: lib/sentence_reader/src/lib.rs ================================================ mod analyzer; mod grammar; pub mod output; mod sentence; use std::path::Path; use once_cell::sync::{Lazy, OnceCell}; use output::ParseResult; use sentence::SentenceAnalyzer; pub use igo_unidic; pub use output::Sentence; pub use sentence::part::{self, Part}; pub static JA_NL_PARSER: Lazy> = Lazy::new(|| OnceCell::new()); pub fn load_parser>(path: P) { let parser = igo_unidic::Parser::new(path.as_ref().to_str().unwrap()).unwrap(); JA_NL_PARSER.set(parser).ok(); } pub fn wait() { JA_NL_PARSER.wait(); } pub fn is_loaded() -> bool { JA_NL_PARSER.get().is_some() } /// Parser for sentence pub struct Parser<'input> { sentence_analyzer: SentenceAnalyzer<'input>, } impl<'input> Parser<'input> { /// Creates a new InputTextParser pub fn new(original: &'input str) -> Self { let sentence_analyzer = SentenceAnalyzer::new( analyzer::get_grammar_analyzer(), JA_NL_PARSER.get().unwrap().parse(original), ); Self { sentence_analyzer } } /// Execute the parsing pub fn parse(&self) -> ParseResult { let mut sent_parse = self.sentence_analyzer.analyze::(); if sent_parse.is_empty() { return ParseResult::None; } else if sent_parse.len() == 1 { let parsed = sent_parse.remove(0); return parsed .has_inflections() .then(|| ParseResult::InflectedWord(parsed)) .unwrap_or(ParseResult::None); } ParseResult::Sentence(Sentence::new(sent_parse)) } } ================================================ FILE: lib/sentence_reader/src/output.rs ================================================ use crate::sentence::part::Part; /// Result of a sentence/inflection analysis #[derive(Debug, Clone)] pub enum ParseResult { Sentence(Sentence), InflectedWord(Part), None, } impl ParseResult { /// Returns `true` if the parse result is [`Sentence`]. /// /// [`Sentence`]: ParseResult::Sentence #[inline] pub fn is_sentence(&self) -> bool { matches!(self, Self::Sentence(..)) } /// Returns `true` if the parse result is [`InflectedWord`]. /// /// [`InflectedWord`]: ParseResult::InflectedWord #[inline] pub fn is_inflected_word(&self) -> bool { matches!(self, Self::InflectedWord(..)) } /// Returns `true` if the parse result is [`None`]. /// /// [`None`]: ParseResult::None #[inline] pub fn is_none(&self) -> bool { matches!(self, Self::None) } #[inline] pub fn as_sentence(&self) -> Option<&Sentence> { if let Self::Sentence(v) = self { Some(v) } else { None } } #[inline] pub fn as_inflected_word(&self) -> Option<&Part> { if let Self::InflectedWord(v) = self { Some(v) } else { None } } } /// A split sentence #[derive(Debug, Clone, PartialEq)] pub struct Sentence { parts: Vec, } impl Sentence { #[inline] pub fn new(parts: Vec) -> Self { Self { parts } } /// Returns word at `pos` #[inline] pub fn get_at(&self, pos: usize) -> Option<&Part> { self.parts.get(pos) } /// Returns word at `pos` #[inline] pub fn get_at_mut(&mut self, pos: usize) -> Option<&mut Part> { self.parts.get_mut(pos) } #[inline] pub fn iter_mut(&mut self) -> impl Iterator { self.parts.iter_mut() } #[inline] pub fn iter(&self) -> impl Iterator { self.parts.iter() } /// returns amount of words #[inline] pub fn word_count(&self) -> usize { self.parts.len() } /// Returns all parts owned #[inline] pub fn into_parts(self) -> Vec { self.parts } } ================================================ FILE: lib/sentence_reader/src/sentence/inflection.rs ================================================ use super::FromMorphemes; use crate::grammar::{rule::Rule, rule_set::RuleSet, Analyzer}; use crate::sentence::SentenceAnalyzer; use igo_unidic::Morpheme; use once_cell::sync::Lazy; use types::jotoba::words::inflection::Inflection; /* #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum Inflection { Negative, Polite, Present, Past, TeForm, Potential, Passive, Causative, CausativePassive, PotentialOrPassive, Imperative, Tai, TeIru, TeAru, TeMiru, TeShimau, Chau, TeOku, Toku, Tara, Tari, } */ impl<'b> FromMorphemes<'static, 'b> for Inflection { /// Parses an inflection from given morpheme(s) fn from(parts: Vec>, _pos: usize) -> Option { let lexemes = parts.iter().map(|i| i.lexeme).collect::>(); if lexemes.is_empty() { None } else if lexemes.len() == 1 { if parts[0].surface == "たら" { return Some(Self::Tara); } Some(match lexemes[0] { "ない" | "ぬ" => Inflection::Negative, "ます" => Inflection::Polite, "て" | "で" => Inflection::TeForm, "だ" | "た" => Inflection::Past, "れる" => Inflection::Passive, "せる" | "させる" => Inflection::Causative, "られる" => Inflection::PotentialOrPassive, "たい" => Inflection::Tai, "たり" | "だり" => Inflection::Tari, "てる" | "でる" => Inflection::TeIru, "とく" | "どく" => Inflection::Toku, "ちゃう" | "じゃう" => Inflection::Chau, "ば" => Inflection::Ba, _ => return None, }) } else { Some(match lexemes.as_slice() { &["て", "いる"] | &["で", "いる"] => Inflection::TeIru, &["て", "ある"] | &["で", "ある"] => Inflection::TeAru, &["て", "みる"] | &["で", "みる"] => Inflection::TeMiru, &["て", "しまう"] | &["で", "しまう"] => Inflection::TeShimau, &["て", "おく"] | &["で", "おく"] => Inflection::TeOku, &["さ", "せる"] => Inflection::Causative, // Fake する; The tokenizer tokenizes the さ of される as a form of する &["する", "れる"] => Inflection::CausativePassive, _ => return None, }) } } } pub(crate) fn parse_inflections(morph: &[Morpheme<'static, '_>]) -> Vec { SentenceAnalyzer::new(&INFLECTION_RULES, morph.to_vec()).analyze::() } static INFLECTION_RULES: Lazy = Lazy::new(|| Analyzer::new(get_rules())); /// Returns a set of rules for japanese text analyzing fn get_rules() -> RuleSet { let mut rules = Vec::with_capacity(7); rules.push(Rule::new("いる", &[])); rules.push(Rule::new("ある", &[])); rules.push(Rule::new("てみる", &[])); rules.push(Rule::new("しまう", &[])); rules.push(Rule::new("おく", &[])); rules.push(Rule::new("れる", &[])); rules.push(Rule::new( "て", &["いる", "ある", "てみる", "しまう", "おく"], )); rules.push(Rule::new("さ", &["れる"])); RuleSet::new(&rules) } ================================================ FILE: lib/sentence_reader/src/sentence/mod.rs ================================================ #![allow(dead_code)] pub mod inflection; pub mod owned_morpheme; pub mod part; use crate::grammar; use igo_unidic::{Morpheme, WordClass}; pub trait FromMorphemes<'a, 'b>: Sized { fn from(parts: Vec>, pos: usize) -> Option; } impl<'b> FromMorphemes<'static, 'b> for (Vec<&'static str>, usize) { #[inline] fn from(parts: Vec>, pos: usize) -> Option { let parts = parts.iter().map(|i| i.lexeme).collect::>(); if parts.is_empty() { return None; } Some((parts, pos)) } } /// An analyzer for sentences/text to portion morphemes together based on rules pub struct SentenceAnalyzer<'input> { grammar: &'input grammar::Analyzer, morphemes: Vec>, } impl<'input> SentenceAnalyzer<'input> { /// Create a new SentenceAnalyer pub fn new( grammar: &'input grammar::Analyzer, morphemes: Vec>, ) -> Self { Self { grammar, morphemes } } /// Returns `true` if SentenceAnalyer would yield no words pub fn is_empty(&self) -> bool { self.morphemes.is_empty() } /// Executes the analyzation and returns a set of Words which are built out of 1..n morphemes pub fn analyze>(&self) -> Vec { let morphs = &self.morphemes; let mut out = Vec::new(); let mut pos = 0; loop { let curr = match morphs.get(pos) { Some(n) => n, None => break, }; // Collect rules of next n morphemes let rules: Vec<_> = morphs[pos..] .iter() .enumerate() .map(|(pos, m)| map_morph_to_rule(pos, m)) // if a morphemes does not have a rule, we can stop // collecting all rules since the analyzer would stop // at a `None` rule anyways .take_while(|i| i.is_some()) .map(|i| i.unwrap()) .collect(); let n_matching = self.grammar.check(&rules); let mut parts = (0..n_matching).map(|i| morphs[pos + i]).collect::>(); if parts.is_empty() { parts.push(*curr); pos += 1; } pos += n_matching; let word_position = out.len(); if let Some(word) = O::from(parts, word_position) { out.push(word); } } out } /// Returns the raw morphemes of the sentence pub fn morphemes(&self) -> &Vec> { &self.morphemes } pub fn debug(&self) { for i in self.morphemes.iter() { println!("{}\t({})({:?})", i.surface, i.lexeme, i.word_class); } println!(); for i in self.analyze::() { print!( "{}|", i.morphemes() .iter() .map(|i| i.surface.as_str()) .collect::() ); } println!(); } } pub(crate) fn map_morph_to_rule(pos: usize, morph: &Morpheme<'_, '_>) -> Option<&'static str> { if morph.surface == "じゃ" { return Some("じゃ"); } if morph.lexeme == "ない" { return Some("ない"); } if morph.lexeme == "たい" { return Some("たい"); } if (morph.lexeme == "た" || morph.lexeme == "だ") && morph.surface != "に" { return Some("た"); } if morph.lexeme == "たり" || morph.lexeme == "だり" { return Some("たり"); } if morph.lexeme == "てる" || morph.lexeme == "でる" { return Some("てる"); } if morph.lexeme == "て" || morph.lexeme == "で" { return Some("て"); } if morph.lexeme == "ある" { return Some("ある"); } if morph.lexeme == "いる" { return Some("いる"); } if morph.lexeme == "ます" { return Some("ます"); } if morph.lexeme == "られる" { return Some("られる"); } if morph.lexeme == "れる" { return Some("れる"); } if morph.lexeme == "しまう" { return Some("しまう"); } if morph.lexeme == "ちゃう" || morph.lexeme == "じゃう" { return Some("ちゃう"); } if morph.lexeme == "おく" { return Some("おく"); } if morph.lexeme == "とく" || morph.lexeme == "どく" { return Some("とく"); } if morph.lexeme == "ば" { return Some("ば"); } if morph.lexeme == "ぬ" { return Some("ん"); } if morph.lexeme == "です" { return Some("です"); } if morph.surface == "さ" && morph.lexeme == "する" { return Some("さ"); } if morph.lexeme == "させる" { return Some("させる"); } if morph.lexeme == "せる" { return Some("せる"); } if morph.lexeme == "頂" && morph.surface == "頂" && morph.reading == "イタダキ" { return Some("いただき"); } // てみる form. Can only be applied if not pos==0. If pos == 0, the word 見る is being used // which does not go with the みる rule if (morph.surface == "み" || morph.lexeme == "みる") && pos > 0 { return Some("てみる"); } if let WordClass::Noun(noun_type) = morph.word_class { return Some(match noun_type { igo_unidic::NounType::Numeral => "NR", _ => "N", }); } if morph.word_class.is_adjective() { return Some("AD"); } if morph.word_class.is_verb() { return Some("V"); } None } /* * TODO: fix Parser not being static #[cfg(test)] mod test { use crate::grammar::Analyzer; use igo_unidic::Parser; use super::*; fn get_parser() -> Parser { igo_unidic::Parser::new("../../unidic-mecab").unwrap() } fn get_g_analyzer() -> &'static Analyzer { crate::analyzer::get_grammar_analyzer() } #[test] pub fn test_analyzer() { let analyzer = get_g_analyzer(); assert!(analyzer.rules().check()); assert_eq!(analyzer.check(&["ない", "て"]), 2); assert_eq!(analyzer.check(&["ない", "abc"]), 1); assert_eq!(analyzer.check(&["い", "い"]), 0); assert_eq!(analyzer.check(&["V", "たい", "ない"]), 3); } #[test] pub fn test_single_words() { let words = &[ "見たくない", "見る", "見ます", "見たい", "見たくない", "見たくなくて", "見たくなかった", "見て", "見ている", "見ています", "見てある", "見てあります", "見ない", "見なくて", "見なかった", "見ません", "見ませんでした", "見られる", "見られて", "見られている", "見られない", "見られなくて", "見られなかった", "見ちゃう", "見てしまう", "持っていない", "美味しい", "美味しかった", "美味しくない", "美味しくなくて", "美味しくなかった", "美味しくて", "便利", "じゃない", "じゃなかった", "じゃなくて", "いちゃう", "いてしまう", "行ってしまう", "行っちゃう", ]; let analyzer = get_g_analyzer(); let parser = get_parser(); for word in words { let sentenec_parser = SentenceAnalyer::new(&analyzer, parser.parse(word)); let analyzed = sentenec_parser.analyze(); if analyzed.len() != 1 { println!("{word}"); panic!("Word split to much"); } if analyzed[0].get_inflected() != *word { println!("{word} != {}", analyzed[0].get_inflected()); panic!("word is not equal to surface"); } } } #[test] pub fn test_long_texts() { let analyzer = get_g_analyzer(); let parser = get_parser(); let inp = &[ "18日午後0時55分頃、札幌市中央区の22階建てホテルの14階にある屋外スペースで、女優の神田沙也加さん(35)が意識不明の状態で倒れているのが見つかり、約9時間後に搬送先の病院で死亡した", "北海道警は、宿泊していた高層階の部屋の窓から転落した可能性があるとみている。ホテル関係者によると、窓は縦、横とも約1メートル。全開できないよう安全装置が取り付けられていたという", "神田沙也加さん死亡、ホテル高層階の部屋から転落か…連絡つかず事務所が警察に通報", "昨今「ウケる」は、面白いという意味で頻繁に使用されています。如何なる面白さにも用いることができ、「この芸人さん超ウケるよね」とか「この遊びウケる」、「この蛇の動き超ウケる」というように使われます。", "しかし、「ウケる」の定義の幅が多いため、「君の顔ウケるよね~」なんて言うと、言った本人に悪気がなくても、言われた側は気に障ってしまうかもしれません。「ウケる」という言葉は便利ですが、時と場所、相手を選んで使うようにしましょう。", "「ウケる」という単語には「超ウケる」というような表現も存在します。これは「超面白い」と同様に「ウケる」に「超」が付いただけのものでありますが、「ウケる」の意味を強調して、本当に面白いさまを表します。", "また「大ウケ」という言葉もあります。こちらは主観的に用いられがちな「ウケる」と違い、客観的な評価を表してしばしば使用されます。たとえば「二次会で披露したギャグが大ウケだった」という場合、自身の芸に観客が大盛り上がりしたという意味になります。", ]; for i in inp { let sentenec_parser = SentenceAnalyer::new(&analyzer, parser.parse(i)); let analyzed = sentenec_parser.analyze(); let mut out = String::new(); for a in analyzed { out.push_str(&a.get_inflected()); } assert_eq!(*i, out); } } } */ ================================================ FILE: lib/sentence_reader/src/sentence/owned_morpheme.rs ================================================ use igo_unidic::{Conjungation, Morpheme, WordClass}; #[derive(Clone, Debug, PartialEq)] pub struct OwnedMorpheme<'dict> { pub surface: String, pub basic: &'dict str, pub word_class: WordClass<'dict>, pub conjungation: Conjungation, pub reading: &'dict str, pub lexeme: &'dict str, pub start: usize, } impl<'dict> From> for OwnedMorpheme<'dict> { #[inline] fn from(m: Morpheme<'dict, '_>) -> Self { Self { surface: m.surface.to_string(), basic: m.basic, word_class: m.word_class, conjungation: m.conjungation, reading: m.reading, lexeme: m.lexeme, start: m.start, } } } impl<'dict> OwnedMorpheme<'dict> { /// Gets the main lexeme. Falls back on surface if lexeme is empty pub fn reading(&self) -> &str { if !self.lexeme.is_empty() { self.lexeme } else { &self.surface } } } ================================================ FILE: lib/sentence_reader/src/sentence/part.rs ================================================ use super::{inflection, owned_morpheme::OwnedMorpheme, FromMorphemes}; use igo_unidic::{Morpheme, WordClass}; use jp_utils::{ furi::{ segment::{kanji::as_kanji::AsKanjiSegment, AsSegment}, Furigana, }, JapaneseExt, }; use types::{ api::app::search::responses::words::SentencePart, jotoba::words::{inflection::Inflection, part_of_speech::PosSimple}, }; /// A single word within a sentence. This already contains all inflection parts #[derive(Debug, Clone, PartialEq)] pub struct Part { /// All morphemes building the (inflected) word morphemes: Vec>, inflections: Vec, pos: usize, furigana: Option, } impl Part { /// Creates a new sentence part. Automatically parses additional morphemes to inflections pub fn new(morphemes: Vec>, pos: usize) -> Option { if morphemes.len() == 0 { return None; } // parse inflections let inflections = inflection::parse_inflections(&morphemes[1..]); // get them owned let morphemes = morphemes.into_iter().map(|i| i.into()).collect::>(); Some(Self { furigana: None, inflections, pos, morphemes, }) } /// Returns `true` if the part has at least one inflection pub fn has_inflections(&self) -> bool { !self.inflections().is_empty() } /// Get a reference to the parts morphemes. pub fn morphemes(&self) -> &[OwnedMorpheme] { &self.morphemes } /// Get a reference to the word's inflections. pub fn inflections(&self) -> &[Inflection] { &self.inflections } /// Returns the full surface of the part. If it has inflections, this surface represents the /// word written with all inflections. If there are no inflections, this method returns the /// same as `get_normalized()` pub fn get_inflected(&self) -> String { self.morphemes .iter() .map(|i| i.surface.as_str()) .collect::() } /// Returns the normalized form of the word. All inflections are removed and the dictionary /// form of the word is returned pub fn get_normalized(&self) -> String { self.get_main_morpheme().lexeme.to_string() } /// Get the part's pos. pub fn pos(&self) -> usize { self.pos } /// Sets the furigana pub fn set_furigana(&mut self, add_fn: F) where F: Fn(&str) -> Option, { let mut out = String::new(); let mut has_furigana = false; for morpheme in &self.morphemes { if !morpheme.surface.has_kanji() { out.push_str(&morpheme.surface); continue; } if let Some(furi) = add_fn(morpheme.reading()) { let surface = &morpheme.surface; // check if `furi` really contains furigana. If this is not the case but // `has_furigana` is true, the text will be rendered weird if !furi.contains('|') || !can_merge_furi(surface, &furi) { out.push_str(&furi); } else if let Some(furi) = merge_furigana(surface, &furi) { has_furigana = true; out.push_str(&furi); } continue; } out.push_str(&morpheme.surface); } if has_furigana { self.furigana = Some(out); } } /// Returns furigana of the word pub fn furigana(&self) -> Option<&str> { self.furigana.as_deref() } /// returns msgid for the current word_class or None if no word_class is set pub fn word_class(&self) -> Option<&'static str> { let main_morph = self.get_main_morpheme(); let main_morph_wc = main_morph.word_class; if main_morph_wc.is_symbol() && !self.main_lexeme().is_symbol() { return Some("Undetected"); } Some(match main_morph_wc { WordClass::Particle(_) => "Particle", WordClass::Verb(_) => "Verb", WordClass::Adjective(_) => "Adjective", WordClass::Adverb => "Adverb", WordClass::Noun(_) => "Noun", WordClass::Pronoun => "Pronoun", WordClass::Interjection => "Interjection", WordClass::Symbol => "Symbol", WordClass::Conjungtion => "Conjungtion", WordClass::Suffix => "Suffix", WordClass::Prefix => "Prefix", WordClass::PreNoun => "Pre-noun", WordClass::Space => "Space", }) } pub fn word_class_raw(&self) -> &WordClass<'_> { &self.get_main_morpheme().word_class } /// Gets wordclass in lowercase pub fn word_class_lower(&self) -> Option { self.word_class().map(|i| i.to_lowercase()) } /// Returns the morpheme containing the actual 'word' without any inflections fn get_main_morpheme(&self) -> &OwnedMorpheme { &self.morphemes[0] } /// Gets the main lexeme. Falls back on surface if lexeme is empty fn main_lexeme(&self) -> &str { self.get_main_morpheme().reading() } } impl<'b> FromMorphemes<'static, 'b> for Part { #[inline] fn from(parts: Vec>, pos: usize) -> Option { Self::new(parts, pos) } } /// Merges a reading with its given furigana. This is required for cases where `furi` does not /// represent he same kana reading as `src`. /// /// Example: /// src: "行った" furi: "[行|い]く" => [行|い]った fn merge_furigana(src: &str, furi: &str) -> Option { let mut out_buf = String::new(); // All Kanji parts // let mut kanji_furis = furigana::parse::from_str(furi) let furi = Furigana(furi); let mut kanji_furis = furi .segments() // .filter_map(|i| i.as_ref().map(|i| i.is_kanji()).unwrap_or(false).then(|| i)) .filter(|i| i.is_kanji()) .collect::>() .into_iter(); for src_part in jp_utils::tokenize::by_alphabet(src, true) { if !src_part.is_kanji() { out_buf.push_str(src_part); continue; } let kanji_furi = kanji_furis.next()?; if src_part != *kanji_furi.as_kanji().unwrap().literals() { return None; } out_buf.push_str(&kanji_furi.encode()); } Some(out_buf) } /// Returns `true` if the given src word can be merged with the given furigana fn can_merge_furi(src: &str, furi: &str) -> bool { if !src.has_kanji() { return false; } let furigana = Furigana(furi); let kanji_furis = furigana .segments() .filter(|i| i.is_kanji()) .collect::>(); let mut kanji_furis = kanji_furis.into_iter(); for src_part in jp_utils::tokenize::by_alphabet(src, true) { if !src_part.is_kanji() { continue; } let kanji_furi = match kanji_furis.next() { Some(v) => v, None => return false, }; if src_part != *kanji_furi.as_kanji().unwrap().literals() { return false; } } true } impl Into for Part { #[inline] fn into(self) -> SentencePart { let furigana = self.furigana().map(|i| i.to_string()); let position = self.pos(); let inflected = self.get_inflected(); let word_class = self.word_class(); SentencePart::new(furigana, position, inflected, word_class) } } /// Converts WordClass to simple part of speech pub fn wc_to_simple_pos(wc: &WordClass) -> Option { Some(match wc { WordClass::Particle(_) => PosSimple::Particle, WordClass::Verb(_) => PosSimple::Verb, WordClass::Adjective(_) => PosSimple::Adjective, WordClass::Adverb => PosSimple::Adverb, WordClass::Noun(_) => PosSimple::Noun, WordClass::Pronoun => PosSimple::Pronoun, WordClass::Interjection => PosSimple::Interjection, WordClass::Conjungtion => PosSimple::Conjunction, WordClass::Suffix => PosSimple::Suffix, WordClass::Prefix => PosSimple::Prefix, _ => return None, }) } ================================================ FILE: lib/types/Cargo.toml ================================================ [package] name = "types" version = "0.1.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] japanese = { path = "../japanese", optional = true} localization = { path = "../localization", optional = true } ### Note: This sub-crate is not allowed to have dependencies to other Jotoba crates, unless its only used if `jotoba_intern` is enabled. jp_inflections = { git = "https://github.com/JojiiOfficial/Japanese_Inflections", optional=true } #jp_inflections = { path ="../../../jp_inflections", optional=true} jp_utils = { git = "https://github.com/JojiiOfficial/jp_utils", features = ["furigana"] } strum = { version = "0.25.0", features = ["derive"] } strum_macros = "0.25.1" serde = { version = "1.0.171", features = ["derive"] } bitflags = { git = "https://github.com/JojiiOfficial/BitFlags" } itertools = "0.11.0" [features] default = ["api"] # This feature adds stuff required for Jotoba to work but not necessarily for extern crates, so its made optional jotoba_intern = ["localization", "api", "jp_inflections", "japanese"] # Contains API types, and can be used as rust wrapper around the Jotoba API api = [] raw_types = [] [dev-dependencies] test-case = "3.1.0" ================================================ FILE: lib/types/src/api/app/completions/mod.rs ================================================ use crate::jotoba::search::SearchTarget; use serde::{Deserialize, Serialize}; /// Request payload structure for suggestion endpoint #[derive(Deserialize, Debug)] pub struct Request { /// The search query to find suggestions for pub input: String, /// The user configured language #[serde(default)] pub lang: String, /// The search type the input is designed for #[serde(default)] #[serde(rename = "search_type")] pub search_target: SearchTarget, #[serde(default)] pub radicals: Vec, #[serde(default)] pub hashtag: bool, } /// Response struct for suggestion endpoint #[derive(Serialize, Deserialize, Default)] pub struct Response { pub suggestions: Vec, pub suggestion_type: SuggestionType, } impl Response { #[inline] pub fn new(suggestions: Vec) -> Self { Self { suggestions, suggestion_type: SuggestionType::Default, } } #[inline] pub fn with_type(suggestions: Vec, suggestion_type: SuggestionType) -> Self { Self { suggestions, suggestion_type, } } } /// The type of suggestion. `Default` in most cases #[derive(Deserialize, Serialize, Default)] #[serde(rename_all = "snake_case")] pub enum SuggestionType { /// Default suggestion type #[default] Default, /// Special suggestion type for kanji readings KanjiReading, /// Hash tag suggestions Hashtag, } /// A word with kana and kanji reading used within [`SuggestionResponse`] #[derive(Serialize, Deserialize, Default, PartialEq, Eq, Debug, Hash, Clone)] pub struct WordPair { pub primary: String, #[serde(skip_serializing_if = "Option::is_none")] pub secondary: Option, } #[cfg(feature = "jotoba_intern")] impl WordPair { #[inline] pub fn new(primary: String) -> Self { Self { primary, secondary: None, } } #[inline] pub fn with_secondary(primary: String, secondary: String) -> Self { Self { primary, secondary: Some(secondary), } } /// Returns true if [`self`] contains [`reading`] #[inline] pub fn has_reading(&self, reading: &str) -> bool { self.primary == reading || self .secondary .as_ref() .map(|i| i == reading) .unwrap_or_default() } #[inline] pub fn secondary_preferred(&self) -> &String { self.secondary.as_ref().unwrap_or(&self.primary) } } #[cfg(feature = "jotoba_intern")] impl From<&crate::jotoba::words::Word> for WordPair { #[inline] fn from(word: &crate::jotoba::words::Word) -> Self { let main_reading = word.get_reading().reading.to_owned(); if word.reading.kanji.is_some() { WordPair { secondary: Some(main_reading), primary: word.reading.kana.reading.clone(), } } else { WordPair { primary: main_reading, secondary: None, } } } } ================================================ FILE: lib/types/src/api/app/details/mod.rs ================================================ pub mod query; pub mod sentence; pub mod word; ================================================ FILE: lib/types/src/api/app/details/query.rs ================================================ use crate::{ api::app::deserialize_lang, jotoba::language::{LangParam, Language}, }; use serde::Deserialize; #[derive(Deserialize)] pub struct DetailsPayload { pub sequence: u32, #[serde(deserialize_with = "deserialize_lang")] pub language: Language, pub show_english: bool, } impl DetailsPayload { #[inline] pub fn lang_param(&self) -> LangParam { LangParam::with_en_raw(self.language, self.show_english) } } ================================================ FILE: lib/types/src/api/app/details/sentence.rs ================================================ use serde::{Deserialize, Serialize}; use crate::api::app::search::responses::{kanji::Kanji, sentences::Sentence, words::Word}; #[derive(Serialize, Deserialize)] pub struct Details { sentence: Sentence, #[serde(skip_serializing_if = "Vec::is_empty")] words: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] kanji: Vec, } impl Details { pub fn new(sentence: Sentence, words: Vec, kanji: Vec) -> Self { Self { sentence, words, kanji, } } } ================================================ FILE: lib/types/src/api/app/details/word.rs ================================================ use serde::Serialize; use crate::{ api::{app::search::responses::kanji::Kanji, app::search::responses::words::Word}, jotoba::words::inflection::Inflections, }; #[derive(Serialize)] pub struct Details { word: Word, #[serde(skip_serializing_if = "Vec::is_empty")] kanji: Vec, #[serde(skip_serializing_if = "Option::is_none")] conjugations: Option, #[serde(skip_serializing_if = "Vec::is_empty")] collocations: Vec, has_sentence: bool, #[serde(skip_serializing_if = "Option::is_none")] transitivity_pair: Option, } #[derive(Serialize)] #[serde(tag = "t", content = "w")] pub enum TransitivityPair { Transitive(u32), Intransitive(u32), } impl Details { #[inline] pub fn new( word: Word, kanji: Vec, conjugations: Option, collocations: Vec, has_sentence: bool, transitivity_pair: Option, ) -> Self { Self { word, kanji, conjugations, collocations, has_sentence, transitivity_pair, } } } ================================================ FILE: lib/types/src/api/app/image/mod.rs ================================================ use serde::{Deserialize, Serialize}; /// Scan endpoint response #[derive(Serialize, Deserialize)] pub struct Response { pub text: String, } /// Scan endpoint request #[derive(Deserialize)] pub struct Request { /// The min amount of confidence the image scan resulted in. Everything below will be treated /// as fail #[serde(default = "default_conf_threshold")] pub threshold: i32, } /// Default mit threshold value for detection confidence #[inline] fn default_conf_threshold() -> i32 { 55 } ================================================ FILE: lib/types/src/api/app/kanji/ids_tree.rs ================================================ use serde::{Deserialize, Serialize}; #[derive(Deserialize, Serialize)] pub struct Request { pub literal: char, pub full: bool, } #[derive(Deserialize, Serialize)] pub struct Response { tree: OutObject, has_big: bool, } impl Response { pub fn new(tree: OutObject, has_big: bool) -> Self { Self { tree, has_big } } } #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct OutObject { name: char, literal_available: bool, #[serde(skip_serializing_if = "Vec::is_empty")] children: Vec, } impl OutObject { #[inline] pub fn new(name: char) -> Self { Self { name, children: vec![], literal_available: false, } } #[inline] pub fn with_children(name: char, children: Vec) -> Self { Self { name, children, literal_available: false, } } #[inline] pub fn add_child(&mut self, child: Self) { self.children.push(child) } #[inline] pub fn set_literal_available(&mut self, literal_available: bool) { self.literal_available = literal_available; } } ================================================ FILE: lib/types/src/api/app/kanji/mod.rs ================================================ pub mod ids_tree; ================================================ FILE: lib/types/src/api/app/mod.rs ================================================ pub mod completions; pub mod details; pub mod image; pub mod kanji; pub mod news; pub mod radical; pub mod search; use crate::jotoba::language::Language; use serde::{Deserialize, Deserializer}; use std::str::FromStr; /// Deserializes a field into a Option. None if invalid lang-str, empty or Deserializing str /// failed #[inline] pub fn deserialize_lang_option<'de, D>(s: D) -> Result, D::Error> where D: Deserializer<'de>, { let s = String::deserialize(s)?; if s.trim().is_empty() { return Ok(None); } return Ok(Language::from_str(&s).ok()); } /// Deserializes a field into a Option. None if invalid lang-str, empty or Deserializing str /// failed #[inline] pub fn deserialize_lang<'de, D>(s: D) -> Result where D: Deserializer<'de>, { let lang = Language::from_str(&String::deserialize(s)?).unwrap_or_default(); return Ok(lang); } ================================================ FILE: lib/types/src/api/app/news/long.rs ================================================ use serde::{Deserialize, Serialize}; use super::NewsEntry; #[derive(Deserialize)] pub struct Request { pub id: u32, } #[derive(Serialize, Deserialize)] pub struct Response { pub entry: NewsEntry, } ================================================ FILE: lib/types/src/api/app/news/mod.rs ================================================ pub mod long; pub mod short; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Clone)] pub struct NewsEntry { pub id: u32, pub title: String, pub html: String, pub creation_time: u64, pub trimmed: bool, } ================================================ FILE: lib/types/src/api/app/news/short.rs ================================================ use serde::{Deserialize, Serialize}; use super::NewsEntry; #[derive(Deserialize)] pub struct Request { pub after: u64, } #[derive(Serialize, Deserialize)] pub struct Response { pub entries: Vec, } ================================================ FILE: lib/types/src/api/app/radical/find_kanji.rs ================================================ use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// Request struct for kanji_by_radicals endpoint #[derive(Deserialize)] pub struct Request { pub radicals: Vec, } /// Response struct for kanji_by_radicals endpoint #[derive(Serialize, Deserialize)] pub struct Response { pub kanji: HashMap>, pub possible_radicals: HashMap>, } ================================================ FILE: lib/types/src/api/app/radical/mod.rs ================================================ pub mod find_kanji; pub mod search; ================================================ FILE: lib/types/src/api/app/radical/search.rs ================================================ use serde::{Deserialize, Serialize}; use std::collections::{BTreeSet, HashMap}; /// Request struct for kanji_by_radicals endpoint #[derive(Deserialize)] pub struct Request { pub query: String, } /// Response struct for kanji_by_radicals endpoint #[derive(Serialize, Deserialize, Default)] pub struct Response { pub radicals: HashMap>, pub kanji: Vec, } /// Kanji literal with radicals #[derive(Serialize, Deserialize, Default, PartialEq, Eq)] pub struct KanjiRads { pub kanji: char, pub rads: HashMap>, } impl KanjiRads { #[inline] pub fn new(kanji: char, rads: HashMap>) -> Self { Self { kanji, rads } } } ================================================ FILE: lib/types/src/api/app/search/mod.rs ================================================ pub mod query; pub mod responses; ================================================ FILE: lib/types/src/api/app/search/query.rs ================================================ use crate::{ api::app::{deserialize_lang, deserialize_lang_option}, jotoba::language::{LangParam, Language}, }; use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct SearchPayload { pub settings: UserSettings, /// Searched query text pub query_str: String, /// Result page #[serde(default)] pub page: Option, /// Index in sentence reader #[serde(default)] pub word_index: Option, /// Overwrite #[serde(default, deserialize_with = "deserialize_lang_option")] pub lang_overwrite: Option, } impl SearchPayload { /// Returns language parameters for the query #[inline] pub fn lang_param(&self) -> LangParam { self.settings.lang_param() } } /// APP settings #[derive(Debug, Clone, Copy, Deserialize)] pub struct UserSettings { #[serde(deserialize_with = "deserialize_lang")] pub user_lang: Language, pub show_english: bool, pub page_size: u32, pub show_example_sentences: bool, pub sentence_furigana: bool, } impl UserSettings { /// Returns language parameters for user settinsg #[inline] pub fn lang_param(&self) -> LangParam { LangParam::with_en_raw(self.user_lang, self.show_english) } } ================================================ FILE: lib/types/src/api/app/search/responses/k_compounds.rs ================================================ use serde::{Deserialize, Serialize}; /// Response for kanji compound request #[derive(Deserialize, Serialize)] pub struct CompoundResponse { pub compounds: Vec, } impl CompoundResponse { #[inline] pub fn new(compounds: Vec) -> Self { Self { compounds } } } /// Set of compounds for a single kanji #[derive(Deserialize, Serialize)] pub struct CompoundSet { #[serde(skip_serializing_if = "Vec::is_empty")] pub on: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub kun: Vec, } impl CompoundSet { #[inline] pub fn new(on: Vec, kun: Vec) -> Self { Self { on, kun } } } /// A word used in kanji compounds #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CompoundWord { pub jp: String, pub kana: String, pub translations: Vec, } impl CompoundWord { /// Create a new CompoundWord pub fn new(jp: String, kana: String, translations: Vec) -> Self { Self { jp, kana, translations, } } /// Convertes a Word to a CompoundWord. Takes ALL senses and ALL glosses. If you only want /// some of the glosses, filter them first pub fn from_word(word: &crate::jotoba::words::Word) -> Self { let jp = word.get_reading().reading.clone(); let kana = word.reading.kana.reading.clone(); let translations = word .senses .iter() .map(|i| i.glosses.clone()) .flatten() .map(|i| i.gloss) .collect::>(); Self::new(jp, kana, translations) } } ================================================ FILE: lib/types/src/api/app/search/responses/kanji.rs ================================================ use serde::{Deserialize, Serialize}; use crate::jotoba::kanji::radical::DetailedRadical; /// Kanji API response. Contains all kanji #[derive(Clone, Debug, Serialize)] pub struct KanjiResponse { kanji: Vec, } impl KanjiResponse { #[inline] pub fn new(kanji: Vec) -> Self { Self { kanji } } } /// Kanji information #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Kanji { pub literal: char, pub stroke_count: u8, #[serde(skip_serializing_if = "Option::is_none")] pub grade: Option, #[serde(skip_serializing_if = "Option::is_none")] pub frequency: Option, #[serde(skip_serializing_if = "Option::is_none")] pub jlpt: Option, #[serde(skip_serializing_if = "Vec::is_empty")] pub onyomi: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub kunyomi: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub variant: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub chinese: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub korean_romaji: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub korean_hangul: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub nanori: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub similar_kanji: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub meanings: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub parts: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] pub vietnamese: Vec, pub has_compounds: bool, pub radical: DetailedRadical, } impl From for Kanji { #[inline] fn from(k: crate::jotoba::kanji::Kanji) -> Self { let has_compounds = !k.on_dicts.is_empty() || !k.kun_dicts.is_empty(); Self { literal: k.literal, stroke_count: k.stroke_count, grade: k.grade, frequency: k.frequency, jlpt: k.jlpt, onyomi: k.onyomi, kunyomi: k.kunyomi, variant: k.variant, chinese: k.chinese, korean_romaji: k.korean_r, korean_hangul: k.korean_h, nanori: k.nanori, similar_kanji: k.similar_kanji, meanings: k.meanings, parts: k.parts, radical: k.radical, vietnamese: k.vietnamese, has_compounds, } } } ================================================ FILE: lib/types/src/api/app/search/responses/mod.rs ================================================ pub mod k_compounds; pub mod kanji; pub mod names; pub mod sentences; pub mod words; use serde::Serialize; use crate::jotoba::{pagination::page::Page, search::help::SearchHelp}; #[derive(Serialize)] pub struct Response { #[serde(flatten)] inner: Page, #[serde(skip_serializing_if = "Option::is_none")] search_help: Option, } impl Response { pub fn new(inner: Page) -> Self { Self { inner, search_help: None, } } pub fn with_help(inner: Page, search_help: SearchHelp) -> Self { Self { inner, search_help: Some(search_help), } } pub fn with_help_fn(inner: Page, help_fn: S) -> Self where S: Fn(&Page) -> Option, { Self { search_help: help_fn(&inner), inner, } } pub fn set_search_help(&mut self, search_help: SearchHelp) -> &mut Self { self.search_help = Some(search_help); self } } ================================================ FILE: lib/types/src/api/app/search/responses/names.rs ================================================ use serde::Serialize; use crate::jotoba::names::Name; /// Names API response. Contains all Names #[derive(Clone, Debug, Serialize)] pub struct Response { names: Vec, } impl Response { #[inline] pub fn new(names: Vec) -> Self { Self { names } } } ================================================ FILE: lib/types/src/api/app/search/responses/sentences.rs ================================================ use serde::{Deserialize, Serialize}; /// Names API response. Contains all Names #[derive(Serialize, Deserialize, Clone)] pub struct Response { sentences: Vec, } impl Response { #[inline] pub fn new(sentences: Vec) -> Self { Self { sentences } } } #[derive(Serialize, Deserialize, Clone)] pub struct Sentence { sequence: u32, content: String, translation: String, } impl Sentence { /// Create a new sentence #[inline] pub fn new(sequence: u32, content: String, translation: String) -> Self { Self { sequence, content, translation, } } } ================================================ FILE: lib/types/src/api/app/search/responses/words/inflection.rs ================================================ use serde::Serialize; use crate::jotoba::words::inflection::Inflection; #[derive(Clone, Serialize)] pub struct InflectionInfo { #[serde(skip_serializing_if = "Vec::is_empty")] inflections: Vec, /// The "uninflected" version lexeme: String, } impl InflectionInfo { /// Create a new InflectionInfo #[inline] pub fn new(inflection: Vec, lexeme: String) -> Self { Self { inflections: inflection, lexeme, } } } ================================================ FILE: lib/types/src/api/app/search/responses/words/mod.rs ================================================ mod inflection; mod sentence; mod word; pub use inflection::*; pub use sentence::*; pub use word::*; use super::kanji::Kanji; use serde::Serialize; /// A word search response #[derive(Clone, Serialize)] pub struct Response { /// All word results for the current search words: Vec, /// Several kanji for the given words kanji: Vec, /// Parsed number from query #[serde(skip_serializing_if = "Option::is_none")] number: Option, /// Inflection information of the current word #[serde(skip_serializing_if = "Option::is_none")] infl_info: Option, /// Sentence reader data #[serde(skip_serializing_if = "Option::is_none")] sentence: Option, /// Query that has actually been used for search original_query: String, } impl Response { /// Create a new Response pub fn new( words: Vec, kanji: Vec, infl_info: Option, sentence: Option, original_query: String, number: Option, ) -> Self { Self { words, kanji, infl_info, sentence, original_query, number, } } } ================================================ FILE: lib/types/src/api/app/search/responses/words/sentence.rs ================================================ use serde::Serialize; #[derive(Clone, Serialize)] pub struct Sentence { /// Currently selected part curr_index: usize, /// All Parts of the sentence parts: Vec, } impl Sentence { #[inline] pub fn new(curr_index: usize, parts: Vec) -> Self { Self { curr_index, parts } } } #[derive(Clone, Serialize)] pub struct SentencePart { /// Original inflected word inflected: String, /// Furigana of the inflected word. None if can't be /// calculated or word is completetly in kana #[serde(skip_serializing_if = "Option::is_none")] furigana: Option, /// Position of the sentence_part in the sentence position: usize, /// Part of Speech #[serde(skip_serializing_if = "Option::is_none")] word_class: Option<&'static str>, } impl SentencePart { #[inline] pub fn new( furigana: Option, position: usize, inflected: String, word_class: Option<&'static str>, ) -> Self { Self { furigana, position, inflected, word_class, } } } ================================================ FILE: lib/types/src/api/app/search/responses/words/word.rs ================================================ use serde::{Deserialize, Serialize}; use crate::jotoba::{ language::Language, words::{ dialect::Dialect, field::Field, misc::Misc, part_of_speech::PartOfSpeech, pitch::Pitch, sense::Gairaigo, }, }; /// A single word item #[derive(Clone, Serialize, Deserialize)] pub struct Word { pub sequence: u32, pub is_common: bool, pub reading: String, #[serde(skip_serializing_if = "Vec::is_empty")] pub alt_readings: Vec, pub senses: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub audio: Option, #[serde(skip_serializing_if = "Vec::is_empty")] pub accents: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub furigana: Option, #[serde(skip_serializing_if = "Option::is_none")] pub jlpt_lvl: Option, #[serde(skip_serializing_if = "Option::is_none")] pub transive_version: Option, #[serde(skip_serializing_if = "Option::is_none")] pub intransive_version: Option, pub sentences_available: u16, } #[derive(Clone, Serialize, Deserialize)] pub struct Sense { #[serde(skip_serializing_if = "Option::is_none")] pub misc: Option, #[serde(skip_serializing_if = "Option::is_none")] pub field: Option, #[serde(skip_serializing_if = "Option::is_none")] pub dialect: Option, pub glosses: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub xref: Option, #[serde(skip_serializing_if = "Option::is_none")] pub antonym: Option, #[serde(skip_serializing_if = "Option::is_none")] pub information: Option, pub part_of_speech: Vec, pub language: Language, #[serde(skip_serializing_if = "Option::is_none")] pub example_sentence: Option<(String, String)>, #[serde(skip_serializing_if = "Option::is_none")] pub gairaigo: Option, } ================================================ FILE: lib/types/src/api/internal/info/mod.rs ================================================ pub mod words; ================================================ FILE: lib/types/src/api/internal/info/words.rs ================================================ use crate::{ api::app::deserialize_lang, jotoba::{ language::{LangParam, Language}, sentences::Sentence, words::{part_of_speech::PosSimple, Word}, }, }; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug)] pub struct Request { pub ids: Vec, #[serde(deserialize_with = "deserialize_lang")] pub language: Language, pub show_english: bool, } impl Request { #[inline] pub fn new(ids: Vec, language: Language, show_english: bool) -> Self { Self { ids, language, show_english, } } #[inline] pub fn lang_param(&self) -> LangParam { LangParam::with_en_raw(self.language, self.show_english) } } #[derive(Serialize, Deserialize)] pub struct Response { pub items: Vec, } #[derive(Serialize, Deserialize)] pub struct WordItem { pub word: Word, pub sentences: Vec, pub audio: Option, pub pos: Vec, } impl WordItem { pub fn new( word: Word, sentences: Vec, audio: Option, pos: Vec, ) -> Self { Self { word, sentences, audio, pos, } } } impl Response { #[inline] pub fn new(items: Vec) -> Self { Self { items } } } ================================================ FILE: lib/types/src/api/internal/mod.rs ================================================ pub mod info; ================================================ FILE: lib/types/src/api/mod.rs ================================================ pub mod app; pub mod internal; pub mod search; ================================================ FILE: lib/types/src/api/search/kanji.rs ================================================ use std::path::Path; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize)] pub struct Response { pub kanji: Vec, } #[derive(Serialize, Deserialize)] pub struct Kanji { literal: String, meanings: Vec, #[serde(skip_serializing_if = "Option::is_none")] grade: Option, stroke_count: u8, #[serde(skip_serializing_if = "Option::is_none")] frequency: Option, #[serde(skip_serializing_if = "Option::is_none")] jlpt: Option, #[serde(skip_serializing_if = "Vec::is_empty")] variant: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] onyomi: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] kunyomi: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] chinese: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] korean_r: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] korean_h: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] parts: Vec, radical: String, #[serde(skip_serializing_if = "Option::is_none")] stroke_frames: Option, } impl Kanji { pub fn from>(kanji: &crate::jotoba::kanji::Kanji, assets_path: P) -> Self { let frames = kanji .has_stroke_frames(assets_path) .then(|| kanji.get_stroke_frames_url()); Self { literal: kanji.literal.to_string(), meanings: kanji.meanings.clone(), grade: kanji.grade, stroke_count: kanji.stroke_count, frequency: kanji.frequency, jlpt: kanji.jlpt, variant: kanji.variant.clone(), onyomi: kanji.onyomi.clone(), kunyomi: kanji.kunyomi.clone(), chinese: kanji.chinese.clone(), korean_r: kanji.korean_r.clone(), korean_h: kanji.korean_h.clone(), parts: kanji.parts.iter().map(|i| i.to_string()).collect(), radical: kanji.radical.literal.to_string(), stroke_frames: frames, } } } ================================================ FILE: lib/types/src/api/search/mod.rs ================================================ pub mod kanji; pub mod name; pub mod sentence; pub mod word; use serde::Deserialize; use crate::jotoba::language::Language; /// An Search API payload #[derive(Deserialize)] pub struct SearchRequest { #[serde(rename = "query")] pub query_str: String, #[serde(default)] pub language: Language, #[serde(default)] pub no_english: bool, } ================================================ FILE: lib/types/src/api/search/name.rs ================================================ use serde::{Deserialize, Serialize}; use crate::jotoba::names::name_type::NameType; #[derive(Serialize, Deserialize)] pub struct Response { names: Vec, } #[derive(Serialize, Deserialize)] pub struct Name { pub kana: String, #[serde(skip_serializing_if = "Option::is_none")] pub kanji: Option, pub transcription: String, #[serde(skip_serializing_if = "Option::is_none")] pub name_type: Option>, } impl From<&crate::jotoba::names::Name> for Name { #[inline] fn from(name: &crate::jotoba::names::Name) -> Self { Self { kana: name.kana.clone(), kanji: name.kanji.clone(), transcription: name.transcription.clone(), name_type: name.name_type.clone(), } } } impl From> for Response { #[inline] fn from(name: Vec<&crate::jotoba::names::Name>) -> Self { let names: Vec = name.into_iter().map(Name::from).collect(); Self { names } } } ================================================ FILE: lib/types/src/api/search/sentence.rs ================================================ use serde::{Deserialize, Serialize}; use crate::jotoba::language::Language; #[derive(Serialize, Deserialize)] pub struct Response { sentences: Vec, } #[derive(Serialize, Deserialize)] pub struct Sentence { pub content: String, pub furigana: String, pub translation: String, pub language: Language, #[serde(skip_serializing_if = "Option::is_none")] pub eng: Option, } impl From> for Response { #[inline] fn from(sentences: Vec) -> Self { Self { sentences } } } ================================================ FILE: lib/types/src/api/search/word.rs ================================================ use std::path::Path; use crate::{ api::search::kanji::Kanji, jotoba::{ language::Language, words::{ dialect::Dialect, field::Field, misc::Misc, part_of_speech::PartOfSpeech, pitch::PitchPart, }, }, }; use serde::{Deserialize, Serialize}; /// The API response struct for a word search #[derive(Serialize, Deserialize)] pub struct Response { kanji: Vec, words: Vec, } impl Response { pub fn new(words: Vec, kanji: Vec) -> Self { Self { kanji, words } } #[cfg(feature = "jotoba_intern")] pub fn from>( wres: ( Vec<&crate::jotoba::words::Word>, Vec<&crate::jotoba::kanji::Kanji>, ), assets_path: P, ) -> Self { let kanji = convert_kanji(wres.1, assets_path); let words = convert_words(wres.0); Self { kanji, words } } } /// Represents a single Word result with 1 (main) Japanese reading and n glosses #[derive(Serialize, Deserialize)] pub struct Word { reading: Reading, common: bool, senses: Vec, #[serde(skip_serializing_if = "Option::is_none")] alt_readings: Option>, #[serde(skip_serializing_if = "Option::is_none")] audio: Option, #[serde(skip_serializing_if = "Option::is_none")] pitch: Option>, } #[derive(Serialize, Deserialize)] pub struct Reading { kana: String, #[serde(skip_serializing_if = "Option::is_none")] kanji: Option, #[serde(skip_serializing_if = "Option::is_none")] furigana: Option, } #[derive(Serialize, Deserialize)] pub struct Sense { glosses: Vec, pos: Vec, language: Language, #[serde(skip_serializing_if = "Option::is_none")] dialect: Option, #[serde(skip_serializing_if = "Option::is_none")] field: Option, #[serde(skip_serializing_if = "Option::is_none")] information: Option, #[serde(skip_serializing_if = "Option::is_none")] antonym: Option, #[serde(skip_serializing_if = "Option::is_none")] misc: Option, #[serde(skip_serializing_if = "Option::is_none")] xref: Option, } impl From<&crate::jotoba::words::sense::Sense> for Sense { fn from(sense: &crate::jotoba::words::sense::Sense) -> Self { let pos = sense.part_of_speech.clone(); let glosses = sense .glosses .iter() .map(|i| i.gloss.clone()) .collect::>(); Self { glosses, pos, language: sense.language, dialect: sense.dialect, field: sense.field, information: sense.information.as_ref().cloned(), antonym: sense.antonym.as_ref().cloned(), misc: sense.misc, xref: sense.xref.as_ref().cloned(), } } } #[cfg(feature = "jotoba_intern")] impl From<&crate::jotoba::words::Word> for Word { #[inline] fn from(word: &crate::jotoba::words::Word) -> Self { let kanji = word.reading.kanji.as_ref().map(|i| i.reading.clone()); let kana = word.reading.kana.clone().reading; let furigana = word.furigana.clone(); let senses = word.senses.iter().map(|i| Sense::from(i)).collect(); let pitch = word.get_first_pitch().map(|i| i.parts.clone()); Self { common: word.is_common(), reading: Reading { kanji, kana, furigana, }, senses, alt_readings: None, audio: word.audio_file_name(), pitch, } } } #[cfg(feature = "jotoba_intern")] #[inline] fn convert_kanji>( wres: Vec<&crate::jotoba::kanji::Kanji>, assets_path: P, ) -> Vec { wres.into_iter() .map(|i| Kanji::from(i, assets_path.as_ref())) .collect() } #[cfg(feature = "jotoba_intern")] #[inline] fn convert_words(wres: Vec<&crate::jotoba::words::Word>) -> Vec { wres.into_iter().map(|i| i.into()).collect() } ================================================ FILE: lib/types/src/jotoba/indexes/hashtag.rs ================================================ use std::str::FromStr; use crate::jotoba::search::SearchTarget; use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize)] pub struct RawHashtag { pub tag: String, pub s_targets: Vec, pub freq: f32, } impl RawHashtag { pub fn new(tag: String, s_targets: Vec, freq: f32) -> Self { Self { tag, s_targets, freq, } } } impl FromStr for RawHashtag { type Err = (); fn from_str(s: &str) -> Result { if s.is_empty() { return Err(()); } let mut split = s.trim().split(' '); let tag = split.next().ok_or(())?.to_string(); let freq = split.next().and_then(|i| i.parse::().ok()).ok_or(())?; let s_targets = split .map(|o| { o.parse::() .ok() .and_then(|i| SearchTarget::try_from(i).ok()) .unwrap() }) .collect::>(); Ok(RawHashtag::new(tag, s_targets, freq)) } } ================================================ FILE: lib/types/src/jotoba/indexes/mod.rs ================================================ pub mod hashtag; ================================================ FILE: lib/types/src/jotoba/kanji/mod.rs ================================================ pub mod radical; pub mod reading; use self::{ radical::DetailedRadical, reading::{Reading, ReadingType}, }; use serde::{Deserialize, Serialize}; use std::{ char, path::{Path, PathBuf}, }; /// A Kanji representing structure containing all available information about a single kanji /// character. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Kanji { pub literal: char, pub grade: Option, pub stroke_count: u8, pub frequency: Option, pub jlpt: Option, pub variant: Vec, pub onyomi: Vec, /// Japanese name readings pub nanori: Vec, pub kunyomi: Vec, pub chinese: Vec, pub korean_r: Vec, pub korean_h: Vec, pub vietnamese: Vec, pub kun_dicts: Vec, pub on_dicts: Vec, pub similar_kanji: Vec, pub meanings: Vec, pub radical: DetailedRadical, pub parts: Vec, } impl Kanji { /// Returns the `ReadingType` of `reading` within readings of a kanji pub fn get_reading_type(&self, reading: &str) -> Option { let in_on = self.in_on_reading(reading); let in_kun = self.in_kun_reading(reading); if in_on && !in_kun { return Some(ReadingType::Onyomi); } else if !in_on && in_kun { return Some(ReadingType::Kunyomi); } None } /// Returns `true` if the kanji has `reading` within the `kunyomi` #[inline] pub fn in_kun_reading(&self, reading: &str) -> bool { self.kunyomi.iter().any(|i| i.as_str() == reading) } /// Returns `true` if the kanji has `reading` within the `onyomi` #[inline] pub fn in_on_reading(&self, reading: &str) -> bool { self.onyomi.iter().any(|i| i.as_str() == reading) } /// Tries to find the given reading in the kanjis readings and returns a `Reading` value if /// found pub fn find_reading(&self, reading: &str) -> Option { let on = self.onyomi.iter().find(|i| i == &reading); let kun = self.kunyomi.iter().find(|i| i == &reading); let r = on.or(kun)?; let rt = if on.is_some() { ReadingType::Onyomi } else { ReadingType::Kunyomi }; Some(Reading::new(rt, self.literal, r.to_string())) } /// Returns an iteratort over all readings pub fn reading_iter(&self) -> impl Iterator { self.kunyomi .iter() .chain(self.onyomi.iter()) .enumerate() .map(|i| (i.1, i.0 as u32)) } pub fn reading_from_pos(&self, pos: usize) -> Option { if pos < self.kunyomi.len() { let r = self.kunyomi.get(pos).unwrap(); Some(Reading::new( ReadingType::Kunyomi, self.literal, r.to_string(), )) } else { let k_len = self.kunyomi.len(); let r = self.onyomi.get(pos - k_len)?; Some(Reading::new( ReadingType::Onyomi, self.literal, r.to_string(), )) } } #[deprecated(note = "use find_reading instead")] #[inline] pub fn get_literal_reading(&self, reading: &str) -> Option { Some(match self.get_reading_type(reading)? { ReadingType::Kunyomi => literal_kun_reading(reading), ReadingType::Onyomi => format_reading(reading), }) } /// Returns true if kanji has a given reading #[inline] pub fn has_reading(&self, reading: &str) -> bool { self.in_on_reading(reading) || self.in_kun_reading(reading) } /// Returns `true` if the kanji has stroke frames #[inline] pub fn has_stroke_frames>(&self, assets_path: P) -> bool { self.get_animation_path(assets_path).exists() } /// Returns the url to stroke-frames svg #[inline] pub fn get_stroke_frames_url(&self) -> String { format!("/assets/svg/kanji/{}_frames.svg", self.literal) } /// Returns the local path of the stroke-frames #[inline] pub fn get_stroke_frames_path>(&self, assets_path: P) -> PathBuf { let frame_path = format!("svg/kanji/{}_frames.svg", self.literal); let frame_path = Path::new(&frame_path); assets_path.as_ref().join(frame_path) //format!("html/assets/svg/kanji/{}_frames.svg", self.literal) } /// Returns the local path of the kanjis stroke-animation #[inline] pub fn get_animation_path>(&self, assets_path: P) -> PathBuf { //format!("html/assets/svg/kanji/{}.svg", self.literal) let frame_path = format!("svg/kanji/{}.svg", self.literal); let frame_path = Path::new(&frame_path); assets_path.as_ref().join(frame_path) } /// Returns `true` if the kanji has a stroke animation file #[inline] pub fn has_animation_file>(&self, assets_path: P) -> bool { //Path::new(&self.get_animation_path()).exists() self.get_animation_path(assets_path).exists() } /// Returns `true` if kanji has on or kun compounds (or both) #[inline] pub fn has_compounds(&self) -> bool { (!self.on_dicts.is_empty()) || (!self.kun_dicts.is_empty()) } } /// Formats a kun/on reading to a kana entry #[inline] pub fn format_reading(reading: &str) -> String { reading.replace('-', "").replace('.', "") } /// Returns the reading of a kanjis literal, given the kun reading #[inline] pub fn literal_kun_reading(kun: &str) -> String { kun.replace('-', "").split('.').next().unwrap().to_string() } /// Formats `literal` with `reading`, based on `ReadingType` /// /// Example: /// /// literal: 捗 /// reading: はかど.る /// r_type: ReadingType::Kunyomi /// returns: 捗る pub fn format_reading_with_literal(literal: char, reading: &str, r_type: ReadingType) -> String { match r_type { ReadingType::Kunyomi => { let r = if reading.contains('.') { let right = reading.split('.').nth(1).unwrap_or_default(); format!("{}{}", literal, right) } else { literal.to_string() }; r.replace("-", "") } ReadingType::Onyomi => literal.to_string(), } } #[cfg(test)] mod test { use super::*; fn reading_on1() -> Reading { Reading::new(ReadingType::Onyomi, '長', "ちょう".to_string()) } fn reading_kun() -> Reading { Reading::new(ReadingType::Kunyomi, '長', "なが.い".to_string()) } fn reading_kun2() -> Reading { Reading::new(ReadingType::Kunyomi, '車', "くるま".to_string()) } fn reading_kun3() -> Reading { Reading::new(ReadingType::Kunyomi, '大', "-おお.いに".to_string()) } #[test] fn test_reading() { let on1 = reading_on1(); let kun1 = reading_kun(); let kun2 = reading_kun2(); let kun3 = reading_kun3(); let readings = &[on1, kun1, kun2, kun3]; let formatted = &["長", "長い", "車", "大いに"]; for (i, r) in readings.iter().enumerate() { assert_eq!(r.format_reading_with_literal(), formatted[i]); } } } ================================================ FILE: lib/types/src/jotoba/kanji/radical.rs ================================================ use serde::{Deserialize, Serialize}; /// A single radical representing structure #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] pub struct DetailedRadical { pub id: u16, pub literal: char, pub alternative: Option, pub stroke_count: u8, pub readings: Vec, pub translations: Option>, } #[derive(Clone, Serialize, Deserialize)] pub struct SearchRadicalInfo { pub literal: char, pub frequency: u16, pub meanings: Vec, } /// Represents a radical which gets used for kanji-searches #[derive(Debug, Clone, PartialEq)] pub struct SearchRadical { pub radical: char, pub stroke_count: i32, } ================================================ FILE: lib/types/src/jotoba/kanji/reading.rs ================================================ #[cfg(feature = "jotoba_intern")] use japanese::ToKanaExt; use super::Kanji; /// ReadingType of a kanji's reading. `Kunyomi` represents japanese readings and `Onyomi` /// represents original chinese readings. #[derive(Clone, Copy, Debug, PartialEq)] pub enum ReadingType { Kunyomi, Onyomi, } #[derive(Clone, Debug)] pub struct Reading { r_type: ReadingType, literal: char, inner: String, } #[cfg(feature = "jotoba_intern")] impl Reading { /// Returns a string with the reading and literal merged. If the reading is an onyomi reading, /// this is equal to the literal. For kunyomi readings this can be an example: (inner: "だま.る") => "黙る". /// This also formats the reading to hiragana pub fn format_reading_with_literal(&self) -> String { match self.r_type { ReadingType::Kunyomi => { let r = if self.inner.contains('.') { let right = self.inner.split('.').nth(1).unwrap_or_default(); format!("{}{}", self.literal, right) } else { self.literal.to_string() }; r.replace("-", "") } ReadingType::Onyomi => self.literal.to_hiragana(), } } } impl Reading { pub(crate) fn new(r_type: ReadingType, literal: char, inner: String) -> Self { Reading { r_type, literal, inner, } } /// Get the reading's r type. #[inline] pub fn get_type(&self) -> ReadingType { self.r_type } /// Get a mutable reference to the reading's literal. #[inline] pub fn get_literal(&self) -> &char { &self.literal } /// Get a reference to the reading's inner. #[inline] pub fn get_raw(&self) -> &str { self.inner.as_ref() } /// Returns `true` if `kanji` has this reading #[inline] pub fn matches_kanji(&self, kanji: &Kanji) -> bool { self.literal == kanji.literal && kanji.has_reading(&self.inner) } /// Returns the literal as newly allocated `String` #[inline] pub fn get_lit_str(&self) -> String { self.get_literal().to_string() } /// Returns `true` if the literal captures the entire literal #[inline] pub fn is_full_reading(&self) -> bool { !self.inner.contains('-') && !self.inner.contains('.') } } impl PartialEq for &Reading { #[inline] fn eq(&self, other: &ReadingType) -> bool { self.r_type == *other } } /// A kanji-reading search item #[derive(Debug, Clone, PartialEq, Hash)] pub struct ReadingSearch { /// The provided kanji literal pub literal: char, /// The provided kanji reading pub reading: String, } impl ReadingSearch { #[inline] pub fn new(literal: &str, reading: &str) -> Self { ReadingSearch { literal: literal.chars().next().unwrap(), reading: reading.to_string(), } } } ================================================ FILE: lib/types/src/jotoba/language/mod.rs ================================================ pub mod param; pub use param::LangParam; #[cfg(feature = "jotoba_intern")] use localization::traits::Translatable; use serde::{Deserialize, Serialize}; use std::{array::IntoIter, convert::TryFrom}; use strum_macros::{AsRefStr, Display, EnumString}; #[derive( Debug, Display, PartialEq, Eq, Clone, Copy, AsRefStr, EnumString, Hash, Deserialize, Serialize, )] #[repr(u8)] pub enum Language { #[strum(serialize = "eng", serialize = "en-US")] English, #[strum(serialize = "ger", serialize = "de-DE", serialize = "deu")] German, #[strum(serialize = "rus", serialize = "ru")] Russian, #[strum(serialize = "spa", serialize = "es-ES")] Spanish, #[strum(serialize = "swe", serialize = "sv-SE")] Swedish, #[strum(serialize = "fre", serialize = "fr-FR", serialize = "fra")] French, #[strum(serialize = "dut", serialize = "nl-NL", serialize = "nld")] Dutch, #[strum(serialize = "hun", serialize = "hu")] Hungarian, #[strum(serialize = "slv", serialize = "sl-SL", serialize = "svl")] Slovenian, #[strum(serialize = "jpn", serialize = "ja", serialize = "jp")] Japanese, } impl Language { /// Returns an iterator over all Languages #[inline] pub fn iter() -> IntoIter { [ Language::English, Language::German, Language::Russian, Language::Spanish, Language::Swedish, Language::French, Language::Dutch, Language::Hungarian, Language::Slovenian, Language::Japanese, ] .into_iter() } /// Returns an iterator over all Languages which have words with this language #[inline] pub fn iter_word() -> IntoIter { [ Language::English, Language::German, Language::Russian, Language::Spanish, Language::Swedish, Language::French, Language::Dutch, Language::Hungarian, Language::Slovenian, ] .into_iter() } pub fn to_query_format(&self) -> &'static str { match *self { Language::English => "eng", Language::German => "ger", Language::Russian => "rus", Language::Spanish => "spa", Language::Swedish => "swe", Language::French => "fre", Language::Dutch => "dut", Language::Hungarian => "hun", Language::Slovenian => "slv", Language::Japanese => "jpn", } } } impl Default for Language { #[inline] fn default() -> Self { Self::English } } impl TryFrom for Language { type Error = (); #[inline] fn try_from(i: i32) -> Result { Ok(match i { 0 => Self::English, 1 => Self::German, 2 => Self::Russian, 3 => Self::Spanish, 4 => Self::Swedish, 5 => Self::French, 6 => Self::Dutch, 7 => Self::Hungarian, 8 => Self::Slovenian, 9 => Self::Japanese, _ => return Err(()), }) } } impl Into for Language { #[inline] fn into(self) -> i32 { match self { Self::English => 0, Self::German => 1, Self::Russian => 2, Self::Spanish => 3, Self::Swedish => 4, Self::French => 5, Self::Dutch => 6, Self::Hungarian => 7, Self::Slovenian => 8, Self::Japanese => 9, } } } #[cfg(feature = "jotoba_intern")] impl Translatable for Language { #[inline] fn get_id(&self) -> &'static str { match self { Language::English => "English", Language::German => "German", Language::Russian => "Russian", Language::Spanish => "Spanish", Language::Swedish => "Swedish", Language::French => "French", Language::Dutch => "Dutch", Language::Hungarian => "Hungarian", Language::Slovenian => "Slovenian", Language::Japanese => "Japanese", } } } ================================================ FILE: lib/types/src/jotoba/language/param.rs ================================================ use super::Language; use serde::{Deserialize, Serialize}; use std::ops::Deref; /// Language parameter that contains a Language and whether English should be used as fallback #[derive(Clone, Copy, Debug, Deserialize, Serialize)] pub struct LangParam { lang: Language, use_en: bool, } impl LangParam { /// Creates a new LangParam with English fallback disabled #[inline] pub fn new(lang: Language) -> Self { Self::with_en_raw(lang, false) } /// Creates a new LangParam with English fallback enabled #[inline] pub fn with_en(lang: Language) -> Self { Self::with_en_raw(lang, true) } /// Creates a new LangParam with English fallback as custom parameter #[inline] pub fn with_en_raw(lang: Language, use_en: bool) -> Self { Self { lang, use_en } } /// Returns `true` whether English can be used #[inline] pub fn en_fallback(&self) -> bool { self.use_en } /// Returns `true` if the language is `Language::English` #[inline] pub fn is_english(&self) -> bool { self.lang == Language::English } /// Returns the params language #[inline] pub fn language(&self) -> Language { self.lang } /// Returns `true` if the language param matches the given language. This also uses `use_en` /// for the comparison #[inline] pub fn eq_to_lang(&self, lang: &Language) -> bool { self.lang == *lang || (self.en_fallback() && *lang == Language::English) } } impl Deref for LangParam { type Target = Language; #[inline] fn deref(&self) -> &Self::Target { &self.lang } } // Little shortcut to make trait bounds easier to read pub trait AsLangParam: Copy { fn as_lang(self) -> LangParam; } impl + Copy> AsLangParam for T { #[inline] fn as_lang(self) -> LangParam { self.into() } } impl From<&Language> for LangParam { #[inline] fn from(lang: &Language) -> Self { Self::new(*lang) } } impl From for LangParam { #[inline] fn from(lang: Language) -> Self { Self::new(lang) } } impl From<(Language, bool)> for LangParam { #[inline] fn from(lang: (Language, bool)) -> Self { Self::with_en_raw(lang.0, lang.1) } } ================================================ FILE: lib/types/src/jotoba/mod.rs ================================================ /// Contains all structures and enums for Jotoba kanji pub mod kanji; pub mod language; /// Contains all structures and enums for Jotoba names pub mod names; /// Contains structures used for pagination pub mod pagination; /// Contains search related structures and enums pub mod search; pub mod sentences; /// Contains all structures and enums for Jotoba words pub mod words; /// Types used in indexes pub mod indexes; ================================================ FILE: lib/types/src/jotoba/names/mod.rs ================================================ pub mod name_type; use name_type::NameType; use serde::{Deserialize, Serialize}; use std::hash::{Hash, Hasher}; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Name { pub sequence: u32, pub kana: String, pub kanji: Option, pub transcription: String, pub name_type: Option>, pub xref: Option, } impl Name { /// Return `true` if name is gendered pub fn is_gendered(&self) -> bool { self.name_type .as_ref() .map(|i| i.iter().any(|i| i.is_gender())) .unwrap_or(false) } /// Get the gender name-type if exists pub fn get_gender(&self) -> Option { self.name_type .as_ref() .and_then(|i| i.iter().find(|i| i.is_gender()).copied()) } /// Returns `true` if name has at least one non-gender tag pub fn has_non_gender_tags(&self) -> bool { self.name_type .as_ref() .map(|i| i.iter().any(|j| !j.is_gender())) .unwrap_or(false) } #[inline] pub fn get_reading(&self) -> &str { self.kanji.as_ref().unwrap_or(&self.kana) } #[inline] pub fn has_kanji(&self) -> bool { self.kanji.is_some() } } impl PartialEq for Name { #[inline] fn eq(&self, other: &Self) -> bool { self.sequence == other.sequence } } impl Eq for Name {} impl Hash for Name { #[inline] fn hash(&self, state: &mut H) { self.sequence.hash(state); } } ================================================ FILE: lib/types/src/jotoba/names/name_type.rs ================================================ #[cfg(feature = "jotoba_intern")] use localization::traits::Translatable; use serde::{Deserialize, Serialize}; use strum_macros::EnumString; #[derive(Debug, Clone, Copy, EnumString, Serialize, Deserialize, PartialEq, Hash)] #[repr(u8)] pub enum NameType { #[strum(serialize = "company")] Company, #[strum(serialize = "fem")] Female, #[strum(serialize = "masc")] Male, #[strum(serialize = "given")] Given, #[strum(serialize = "organization")] Organization, #[strum(serialize = "person")] Person, #[strum(serialize = "place")] Place, #[strum(serialize = "product")] Product, #[strum(serialize = "station")] RailwayStation, #[strum(serialize = "surname")] Surname, #[strum(serialize = "unclass")] Unclassified, #[strum(serialize = "work")] Work, #[strum(serialize = "char")] Character, #[strum(serialize = "creat")] Creature, #[strum(serialize = "dei")] Deity, #[strum(serialize = "doc")] Document, #[strum(serialize = "ev")] Event, #[strum(serialize = "fict")] Fiction, #[strum(serialize = "group")] Group, #[strum(serialize = "leg")] Legend, #[strum(serialize = "myth")] Mythology, #[strum(serialize = "obj")] Object, #[strum(serialize = "oth")] Other, #[strum(serialize = "relig")] Religion, #[strum(serialize = "serv")] Service, #[strum(serialize = "ship")] Ship, } impl NameType { #[inline] pub fn is_gender(&self) -> bool { matches!(self, Self::Female | Self::Male) } } #[cfg(feature = "jotoba_intern")] impl Translatable for NameType { #[inline] fn get_id(&self) -> &'static str { match self { NameType::Company => "Company", NameType::Female => "Female", NameType::Male => "Male", NameType::Given => "Given name", NameType::Organization => "Organization", NameType::Person => "Persons name", NameType::Place => "Place", NameType::Product => "Product", NameType::RailwayStation => "(Railway)Station", NameType::Surname => "Surname", NameType::Unclassified => "Unknown", NameType::Work => "Art work", NameType::Character => "Character", NameType::Creature => "Creature", NameType::Deity => "Deity", NameType::Document => "Document", NameType::Event => "Event", NameType::Fiction => "Fiction", NameType::Group => "Group", NameType::Legend => "Legend", NameType::Mythology => "Mythology", NameType::Object => "Object", NameType::Other => "Other", NameType::Religion => "Religion", NameType::Service => "Service", NameType::Ship => "Ship", } } } ================================================ FILE: lib/types/src/jotoba/pagination/mod.rs ================================================ pub mod page; use page::Page; use serde::Serialize; use std::cmp::min; /// The amount of buttons the paginator should display max. const BUTTONS_TO_DISPLAY: u8 = 5; /// A Pagination structure holding information about page #[derive(Clone, Copy, Default, Debug)] pub struct Pagination { pub curr_page: u32, pub items: u32, pub items_per_page: u32, pub max_pages: u32, } impl Pagination { #[inline] pub fn new(curr_page: u32, items: u32, items_per_page: u32, max_pages: u32) -> Self { Self { curr_page, items, items_per_page, max_pages, } } #[inline] pub fn new_page( v: T, curr_page: u32, items: u32, items_per_page: u32, max_pages: u32, ) -> Page { Self::new(curr_page, items, items_per_page, max_pages).with_value(v) } /// Returns the number of the last page #[inline] pub fn get_last(&self) -> u32 { ((self.items as f32 / self.items_per_page as f32).ceil() as u32).min(self.max_pages) } /// Returns `true` if the current page is the first page #[inline] pub fn is_first(&self) -> bool { self.curr_page == 1 } /// Returns `true` if the current page is the last page #[inline] pub fn is_last(&self) -> bool { self.curr_page == self.get_last() } pub fn with_value(&self, v: T) -> Page { // always show at least one page. Otherwise it would panic let last = self.get_last().max(1); let curr = self.curr_page.min(last); Page::with_pages(v, curr, last) } /// Generates the pagination buttons pub fn gen_page_buttons(&self) -> impl Iterator + '_ { let btn_count = min(BUTTONS_TO_DISPLAY as u32, self.get_last()); let h_btns = btn_count / 2; let right_btns_inv = h_btns - (self.get_last() - self.curr_page).min(h_btns); let start = self .curr_page .saturating_sub(h_btns + right_btns_inv) // Don't show 0 pages if only one exists .max(1); let end = min(start + btn_count, self.get_last() + 1); (start..end).map(move |page| PaginationButton::new(page, page == self.curr_page)) } } /// Data for a single frontend pagination button. #[derive(Copy, Clone)] pub struct PaginationButton { pub page_nr: u32, pub active: bool, } impl PaginationButton { /// Create a new `PaginationButton` #[inline] fn new(page: u32, active: bool) -> PaginationButton { PaginationButton { page_nr: page, active, } } } ================================================ FILE: lib/types/src/jotoba/pagination/page.rs ================================================ use serde::Serialize; /// A generic API Response type implementing Serialize that can be used for any kind of Response /// that can be a part of multiple pages #[derive(Serialize, Clone)] pub struct Page { /// Paginator content content: T, /// Total amount of Pages pages: u32, /// Current page current_page: u32, } impl Page { /// Creates a new Paginator with default values pub fn new(content: T) -> Self { Self { content, pages: 1, current_page: 1, } } /// Creates a new Paginator with non default page values /// /// # Panics /// /// Panics if `current_page` > `pages` pub fn with_pages(content: T, current_page: u32, pages: u32) -> Self { assert!(current_page <= pages); Self { content, current_page, pages, } } /// Set the paginator's current page. /// /// # Panics /// /// Panics if `current_page` > `pages` pub fn set_current_page(&mut self, current_page: u32) { assert!(current_page <= self.pages); self.current_page = current_page; } /// Set the paginator's pages. /// /// # Panics /// /// Panics if `current_page` > `pages` pub fn set_pages(&mut self, pages: u32) { assert!(self.current_page <= pages); self.pages = pages; } /// Get the paginator's pages. pub fn pages(&self) -> u32 { self.pages } /// Returns `true` if the page is blank #[inline] pub fn is_empty(&self) -> bool { self.pages == 0 } /// Get the paginator's current page. pub fn current_page(&self) -> u32 { self.current_page } } ================================================ FILE: lib/types/src/jotoba/search/guess.rs ================================================ use serde::{Deserialize, Serialize}; /// A guess representing structure. Gives some vague information about the relation to the /// actual value i.e if its likely to be exact, less, etc.. #[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub struct Guess { pub value: u32, pub guess_type: GuessType, } /// Vague guess relation to a guesses actual value #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] pub enum GuessType { Accurate, MoreThan, LessThan, Undefined, } impl Guess { /// Creates a new `Guess` #[inline] pub fn new(value: u32, guess_type: GuessType) -> Self { Self { value, guess_type } } /// Creates a new guess value with a given limit. If `value` exceeds the with_limit /// `GuessType::MoreThan` will be used. Otherwise GuessType::Accurate pub fn with_limit(value: u32, limit: u32) -> Self { let gt; if value > limit { gt = GuessType::MoreThan; } else { gt = GuessType::Accurate; } Self { value: value.min(limit), guess_type: gt, } } /// Formats the guess to a human readable string pub fn format(&self) -> String { let prefix = self.guess_type.get_prefix(); format!("{}{}", prefix, self.value) } } impl GuessType { #[inline] pub fn get_prefix(&self) -> &'static str { match self { GuessType::Accurate => "", GuessType::Undefined => "", GuessType::MoreThan => ">", GuessType::LessThan => "<", } } } ================================================ FILE: lib/types/src/jotoba/search/help.rs ================================================ use serde::{Deserialize, Serialize}; use crate::jotoba::language::Language; use super::{guess::Guess, SearchTarget}; /// Structure containing information for better search help in case no item was /// found in a search #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct SearchHelp { #[serde(skip_serializing_if = "Option::is_none")] pub words: Option, #[serde(skip_serializing_if = "Option::is_none")] pub names: Option, #[serde(skip_serializing_if = "Option::is_none")] pub sentences: Option, #[serde(skip_serializing_if = "Option::is_none")] pub kanji: Option, #[serde(skip_serializing_if = "Vec::is_empty")] pub other_langs: Vec, } impl SearchHelp { pub fn new( words: Option, names: Option, sentences: Option, kanji: Option, other_langs: Vec, ) -> Self { Self { words, names, sentences, kanji, other_langs, } } /// Returns `true` if `SearchHelp` is not helpful at all (empty) pub fn is_empty(&self) -> bool { self.iter_items().next().is_none() } /// Returns an iterator over all (QueryType, Guess) pairs that have a value pub fn iter_items(&self) -> impl Iterator { let types = &[ (self.words, SearchTarget::Words), (self.names, SearchTarget::Names), (self.sentences, SearchTarget::Sentences), (self.kanji, SearchTarget::Kanji), ]; types .iter() .filter_map(|i| i.0.is_some().then(|| (i.1, i.0.unwrap()))) .filter(|i| i.1.value != 0) .collect::>() .into_iter() } pub fn iter_langs(&self) -> impl Iterator + '_ { self.other_langs .iter() .map(|lang| (*lang, lang.to_query_format())) } } ================================================ FILE: lib/types/src/jotoba/search/mod.rs ================================================ pub mod guess; pub mod help; pub mod query_type; pub use query_type::SearchTarget; ================================================ FILE: lib/types/src/jotoba/search/query_type.rs ================================================ #[cfg(feature = "jotoba_intern")] use localization::{language::Language, traits::Translatable, TranslationDict}; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Hash, Default)] pub enum SearchTarget { #[serde(rename = "1")] Kanji, #[serde(rename = "2")] Sentences, #[serde(rename = "3")] Names, #[default] #[serde(rename = "0", other)] Words, } impl SearchTarget { /// Iterate over all query types #[inline] pub fn iterate() -> impl Iterator { [Self::Kanji, Self::Sentences, Self::Names, Self::Words].into_iter() } #[cfg(feature = "jotoba_intern")] pub fn get_translated<'a>( &self, dict: &'a TranslationDict, language: Option, ) -> &'a str { dict.gettext(self.get_id(), language) } #[inline] pub fn get_type_id(&self) -> u8 { match self { SearchTarget::Kanji => 1, SearchTarget::Sentences => 2, SearchTarget::Names => 3, SearchTarget::Words => 0, } } } impl TryFrom for SearchTarget { type Error = (); #[inline] fn try_from(value: u8) -> Result { Ok(match value { 0 => Self::Words, 1 => Self::Kanji, 2 => Self::Sentences, 3 => Self::Names, _ => return Err(()), }) } } #[cfg(feature = "jotoba_intern")] impl Translatable for SearchTarget { #[inline] fn get_id(&self) -> &'static str { match self { SearchTarget::Kanji => "Kanji", SearchTarget::Sentences => "Sentences", SearchTarget::Names => "Names", SearchTarget::Words => "Words", } } } ================================================ FILE: lib/types/src/jotoba/sentences/mod.rs ================================================ pub mod tag; pub mod translation; pub use self::tag::Tag; use super::language::{param::AsLangParam, Language}; use bitflags::BitFlag; use jp_utils::furi::Furigana; use serde::{Deserialize, Serialize}; use std::{ hash::{Hash, Hasher}, num::{NonZeroI8, NonZeroU8}, }; use translation::Translation; /// A single Sentence with multiple translations. #[derive(Clone, Deserialize, Serialize, Default)] pub struct Sentence { pub id: u32, pub japanese: String, pub furigana: String, pub translations: Vec, pub jlpt_guess: Option, pub level: Option, pub tags: Vec, } impl Sentence { /// Create a new sentence #[inline] pub fn new( id: u32, japanese: String, furigana: String, translations: Vec, tags: Vec, ) -> Self { Sentence { id, japanese, furigana, translations, jlpt_guess: None, level: None, tags, } } /// Returns `true` if the sentence has the given tag #[inline] pub fn has_tag(&self, tag: &Tag) -> bool { self.tags.iter().any(|i| i == tag) } /// Returns `true` if the sentence contains a translation for `language` #[inline] pub fn has_translation(&self, lang: impl AsLangParam) -> bool { let lang = lang.as_lang(); self.translations .iter() .any(|tr| lang.eq_to_lang(&tr.language)) } /// Returns the translation for a given language if exists #[inline] pub fn translation_for(&self, language: Language) -> Option<&str> { self.translations .iter() .find(|i| i.language == language) .map(|i| i.text.as_str()) } pub fn get_translation(&self, lang: impl AsLangParam) -> Option<&str> { let lang = lang.as_lang(); if let Some(s) = self.translation_for(lang.language()) { return Some(s); } if lang.en_fallback() { return self.translation_for(Language::English); } None } pub fn set_jlpt_guess(&mut self, guess: u8) { if !(1..=5).contains(&guess) { return; } self.jlpt_guess = Some(NonZeroU8::new(guess).unwrap()) } /// Calculates a bitmask to efficiently determine the supported languages of a sentence pub fn calc_lang_mask(&self) -> u16 { lang_mask(self.translations.iter().map(|i| i.language)) } #[inline] pub fn level(&self) -> Option { // We add 10 to each value in preprocessing to prevent it reaching 0 which // we want to be able to use NonZeroI8 self.level.map(|i| i.get() - 10) } } #[cfg(feature = "jotoba_intern")] impl Sentence { /// Returns the kana reading of a sentence #[inline] pub fn get_kana(&self) -> String { Furigana(&self.furigana).kana_str() } } pub fn lang_mask(langs: I) -> u16 where I: Iterator, { let mut lang_mask = BitFlag::::new(); for lang in langs { let lang: i32 = lang.into(); lang_mask.set_unchecked(lang as u16, true); } lang_mask.raw() } pub fn parse_lang_mask(mask: u16) -> Vec { let mut langs = Vec::new(); for i in 0..10 { if mask & (1 << i) == 0 { continue; } if let Ok(lang) = Language::try_from(i as i32) { langs.push(lang); } } langs } impl Eq for Sentence {} impl PartialEq for Sentence { #[inline] fn eq(&self, other: &Self) -> bool { self.id == other.id } } impl Hash for Sentence { #[inline] fn hash(&self, state: &mut H) { self.id.hash(state); } } impl std::fmt::Debug for Sentence { #[inline] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.japanese) } } ================================================ FILE: lib/types/src/jotoba/sentences/tag.rs ================================================ use serde::{Deserialize, Serialize}; use strum::IntoEnumIterator; use strum_macros::{AsRefStr, EnumIter, EnumString}; #[derive( Debug, PartialEq, Clone, Copy, AsRefStr, Serialize, Deserialize, EnumString, EnumIter, Hash, Eq, )] #[repr(u8)] pub enum Tag { #[strum(serialize = "casual")] Casual, #[strum(serialize = "formal")] Formal, #[strum(serialize = "humble")] Humble, #[strum(serialize = "kansai", serialize = "kansai dialect")] Kansai, #[strum(serialize = "female", serialize = "female speaker")] Female, #[strum(serialize = "male", serialize = "male speaker")] Male, #[strum(serialize = "proverb")] Proverb, #[strum(serialize = "translatedproverb")] TranslatedProverb, #[strum(serialize = "quote")] Quote, #[strum(serialize = "pun", serialize = "japanese puns")] Pun, #[strum(serialize = "ok")] Ok, #[strum(serialize = "japanglish")] Japanglish, #[strum(serialize = "haiku")] Haiku, #[strum(serialize = "vulgar")] Vulgar, #[strum(serialize = "conversation")] Conversation, #[strum(serialize = "slang")] Slang, #[strum(serialize = "meme")] Meme, #[strum(serialize = "bungo")] /// 文語 Bungo, #[strum(serialize = "dialectal")] Dialectal, #[strum(serialize = "poetry")] Poetry, #[strum(serialize = "game")] Game, #[strum(serialize = "manga")] Manga, #[strum(serialize = "lie")] Lie, } impl Tag { #[inline] pub fn iter() -> impl Iterator { ::iter() } #[inline] pub fn as_str(&self) -> &str { self.as_ref() } } ================================================ FILE: lib/types/src/jotoba/sentences/translation.rs ================================================ use crate::jotoba::language::Language; use serde::{Deserialize, Serialize}; /// A Translation for a sentence #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Translation { pub text: String, pub language: Language, } impl From<(String, Language)> for Translation { #[inline] fn from((text, language): (String, Language)) -> Self { Self { text, language } } } ================================================ FILE: lib/types/src/jotoba/words/dialect.rs ================================================ use std::fmt::Display; #[cfg(feature = "jotoba_intern")] use localization::{language::Language, traits::Translatable, TranslationDict}; use serde::{Deserialize, Serialize}; use strum_macros::EnumString; #[derive(Debug, PartialEq, Clone, Copy, EnumString, Serialize, Deserialize, Hash)] #[repr(u8)] pub enum Dialect { #[strum(serialize = "bra")] Brazilian, #[strum(serialize = "hob")] Hokkaido, #[strum(serialize = "ksb")] Kansai, #[strum(serialize = "ktb")] Kantou, #[strum(serialize = "kyb")] Kyoto, #[strum(serialize = "kyu")] Kyuushuu, #[strum(serialize = "nab")] Nagano, #[strum(serialize = "osb")] Osaka, #[strum(serialize = "rkb")] Ryuukyuu, #[strum(serialize = "thb")] Touhoku, #[strum(serialize = "tsb")] Tosa, #[strum(serialize = "tsug")] Tsugaru, } impl Display for Dialect { #[inline] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } impl Into<&'static str> for Dialect { #[inline] fn into(self) -> &'static str { match self { Dialect::Hokkaido => "Hokkaido", Dialect::Brazilian => "Brazilian", Dialect::Kansai => "Kansai", Dialect::Kantou => "Kantou", Dialect::Kyoto => "Kyoto", Dialect::Kyuushuu => "Kyuushuu", Dialect::Nagano => "Nagano", Dialect::Osaka => "Osaka", Dialect::Ryuukyuu => "Ryuukyuu", Dialect::Touhoku => "Touhoku", Dialect::Tosa => "Tosa", Dialect::Tsugaru => "Tsugaru", } } } #[cfg(feature = "jotoba_intern")] impl Translatable for Dialect { #[inline] fn get_id(&self) -> &'static str { (*self).into() } #[inline] fn gettext_custom(&self, dict: &TranslationDict, language: Option) -> String { dict.gettext_fmt("{} dialect", &[self.gettext(dict, language)], language) } } ================================================ FILE: lib/types/src/jotoba/words/dict.rs ================================================ use serde::{Deserialize, Serialize}; use super::{information::Information, priority::Priority}; /// A single dictionary entry representing a words reading #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Hash, Eq)] pub struct Dict { pub reading: String, pub kanji: bool, pub no_kanji: bool, pub priorities: Option>, pub reading_info: Option>, pub is_main: bool, } impl Dict { /// Returns the length of the dictionaries reading #[inline] pub fn len(&self) -> usize { // TODO: use proper len calculation here self.reading.chars().count() } /// Returns `true` if the reading has a length of zero #[inline] pub fn is_empty(&self) -> bool { self.reading.is_empty() } } ================================================ FILE: lib/types/src/jotoba/words/field.rs ================================================ #[cfg(feature = "jotoba_intern")] use localization::{language::Language, traits::Translatable, TranslationDict}; use serde::{Deserialize, Serialize}; use strum_macros::{AsRefStr, EnumString}; #[derive(Debug, PartialEq, Clone, Copy, AsRefStr, EnumString, Serialize, Deserialize, Hash)] #[repr(u8)] pub enum Field { #[strum(serialize = "ski")] Ski, #[strum(serialize = "psyanal")] Psyanal, #[strum(serialize = "agric")] Agriculture, #[strum(serialize = "anat")] Anatomy, #[strum(serialize = "archeol")] Archeology, #[strum(serialize = "archit")] Architecture, #[strum(serialize = "art")] ArtAesthetics, #[strum(serialize = "astron")] Astronomy, #[strum(serialize = "audvid")] AudioVisual, #[strum(serialize = "aviat")] Aviation, #[strum(serialize = "baseb")] Baseball, #[strum(serialize = "biochem")] Biochemistry, #[strum(serialize = "biol")] Biology, #[strum(serialize = "bot")] Botany, #[strum(serialize = "Buddh")] Buddhism, #[strum(serialize = "bus")] Business, #[strum(serialize = "cards")] Cards, #[strum(serialize = "chem")] Chemistry, #[strum(serialize = "Christn")] Christianity, #[strum(serialize = "comp")] Computing, #[strum(serialize = "cloth")] Clothing, #[strum(serialize = "cryst")] Crystallography, #[strum(serialize = "dent")] Dentistry, #[strum(serialize = "ecol")] Ecology, #[strum(serialize = "econ")] Economics, #[strum(serialize = "elec")] Electricity, #[strum(serialize = "electr")] Electronics, #[strum(serialize = "embryo")] Embryology, #[strum(serialize = "engr")] Engineering, #[strum(serialize = "ent")] Entomology, #[strum(serialize = "finc")] Finance, #[strum(serialize = "film")] Film, #[strum(serialize = "fish")] Fishing, #[strum(serialize = "food")] FoodCooking, #[strum(serialize = "gardn")] Gardening, #[strum(serialize = "genet")] Genetics, #[strum(serialize = "geogr")] Geography, #[strum(serialize = "geol")] Geology, #[strum(serialize = "geom")] Geometry, #[strum(serialize = "go")] GoGame, #[strum(serialize = "golf")] Golf, #[strum(serialize = "gramm")] Grammar, #[strum(serialize = "grmyth")] GreekMythology, #[strum(serialize = "hanaf")] Hanafuda, #[strum(serialize = "horse")] Horseracing, #[strum(serialize = "law")] Law, #[strum(serialize = "kabuki")] Kabuki, #[strum(serialize = "ling")] Linguistics, #[strum(serialize = "logic")] Logic, #[strum(serialize = "MA")] MartialArts, #[strum(serialize = "mahj")] Mahjong, #[strum(serialize = "manga")] Manga, #[strum(serialize = "math")] Mathematics, #[strum(serialize = "mech")] MechanicalEngineering, #[strum(serialize = "med")] Medicine, #[strum(serialize = "met")] ClimateWeather, #[strum(serialize = "mining")] Mining, #[strum(serialize = "mil")] Military, #[strum(serialize = "noh")] Noh, #[strum(serialize = "music")] Music, #[strum(serialize = "ornith")] Ornithology, #[strum(serialize = "paleo")] Paleontology, #[strum(serialize = "pathol")] Pathology, #[strum(serialize = "pharm")] Pharmacy, #[strum(serialize = "phil")] Philosophy, #[strum(serialize = "photo")] Photography, #[strum(serialize = "physics")] Physics, #[strum(serialize = "physiol")] Physiology, #[strum(serialize = "politics")] Politics, #[strum(serialize = "print")] Printing, #[strum(serialize = "psych")] Psychology, #[strum(serialize = "psy")] Psychitatry, #[strum(serialize = "Shinto")] Shinto, #[strum(serialize = "rail")] Railway, #[strum(serialize = "rommyth")] RomanMythology, #[strum(serialize = "stockm")] StockMarket, #[strum(serialize = "shogi")] Shogi, #[strum(serialize = "sports")] Sports, #[strum(serialize = "stat")] Statistics, #[strum(serialize = "sumo")] Sumo, #[strum(serialize = "telec")] Telecommunications, #[strum(serialize = "tradem")] Trademark, #[strum(serialize = "tv")] TV, #[strum(serialize = "vidg")] Videogame, #[strum(serialize = "zool")] Zoology, } #[cfg(feature = "jotoba_intern")] impl Translatable for Field { fn get_id(&self) -> &'static str { match self { Field::Agriculture => "Agriculture", Field::Anatomy => "Anatomy", Field::Archeology => "Archeology", Field::Architecture => "Architecture", Field::ArtAesthetics => "Art aesthetics", Field::Astronomy => "Astronomy", Field::AudioVisual => "Audio/visual", Field::Aviation => "Aviation", Field::Baseball => "Baseball", Field::Biochemistry => "Biochemistry", Field::Biology => "Biology", Field::Botany => "Botany", Field::Buddhism => "Buddhism", Field::Business => "Business", Field::Cards => "Cards", Field::Chemistry => "Chemistry", Field::Christianity => "Christianity", Field::Computing => "Computing", Field::Crystallography => "Crystallography", Field::Ecology => "Ecology", Field::Economics => "Economics", Field::Electricity => "Electricity", Field::Electronics => "Electronics", Field::Embryology => "Embryology", Field::Engineering => "Engineering", Field::Entomology => "Entomology", Field::Film => "Film", Field::Finance => "Finance", Field::Fishing => "Fishing", Field::FoodCooking => "FoodCooking", Field::Gardening => "Gardening", Field::Genetics => "Genetics", Field::Geography => "Geography", Field::Geology => "Geology", Field::Geometry => "Geometry", Field::GoGame => "Go (game)", Field::Golf => "Golf", Field::Grammar => "Grammar", Field::GreekMythology => "Greek mythology", Field::Hanafuda => "Hanafuda", Field::Horseracing => "Horseracing", Field::Kabuki => "Kabuki", Field::Law => "Law", Field::Linguistics => "Linguistics", Field::Logic => "Logic", Field::MartialArts => "Martial arts", Field::Mahjong => "Mahjong", Field::Mathematics => "Mathematics", Field::MechanicalEngineering => "MechanicalEngineering", Field::Medicine => "Medicine", Field::Mining => "Mining", Field::ClimateWeather => "Climate/weather", Field::Manga => "Manga", Field::Military => "Military", Field::Music => "Music", Field::Ornithology => "Ornithology", Field::Paleontology => "Paleontology", Field::Pathology => "Pathology", Field::Pharmacy => "Pharmacy", Field::Philosophy => "Philosophy", Field::Photography => "Photography", Field::Physics => "Physics", Field::Physiology => "Physiology", Field::Printing => "Printing", Field::Psychology => "Psychology", Field::Psychitatry => "Psychiatry", Field::Railway => "Railway", Field::RomanMythology => "Roman Mythology", Field::StockMarket => "Stock market", Field::Shinto => "Shinto", Field::Shogi => "Shogi", Field::Sports => "Sports", Field::Statistics => "Statistics", Field::Sumo => "Sumo", Field::Telecommunications => "Telecommunications", Field::Trademark => "Trademark", Field::TV => "TV", Field::Videogame => "Videogame", Field::Zoology => "Zoology", Field::Clothing => "Clothing", Field::Dentistry => "Dentistry", Field::Politics => "Politics", Field::Noh => "Noh", Field::Psyanal => "Psyanal", Field::Ski => "Ski", } } // Translate to eg "Zoology term" fn gettext_custom(&self, dict: &TranslationDict, language: Option) -> String { dict.gettext_fmt("{} term", &[self.gettext(dict, language)], language) } } ================================================ FILE: lib/types/src/jotoba/words/foreign_language.rs ================================================ #[cfg(feature = "jotoba_intern")] use localization::traits::Translatable; use serde::{Deserialize, Serialize}; use strum_macros::{AsRefStr, EnumString}; #[derive(Debug, PartialEq, Clone, Copy, AsRefStr, EnumString, Serialize, Deserialize, Hash)] #[repr(u8)] pub enum ForeignLanguage { #[strum(serialize = "eng")] English, #[strum(serialize = "geo")] Georgian, #[strum(serialize = "ger")] German, #[strum(serialize = "chi")] Chinese, #[strum(serialize = "may")] Manchu, #[strum(serialize = "kur")] Kurdish, #[strum(serialize = "mnc")] ChinookJargon, #[strum(serialize = "ita")] Italian, #[strum(serialize = "mal")] Malayalam, #[strum(serialize = "tib")] Tibetian, #[strum(serialize = "m")] Mongolian, #[strum(serialize = "ru")] Romanian, #[strum(serialize = "b")] Bantu, #[strum(serialize = "nor")] Norwegian, #[strum(serialize = "gr", serialize = "grc")] Greek, #[strum(serialize = "ice")] Icelandic, #[strum(serialize = "br")] Breton, #[strum(serialize = "mao")] Maori, #[strum(serialize = "lat")] Latin, #[strum(serialize = "amh")] Amharic, #[strum(serialize = "khm")] Khmer, #[strum(serialize = "swa")] Swahili, #[strum(serialize = "heb")] Hebrew, #[strum(serialize = "glg")] Galician, #[strum(serialize = "kor")] Korean, #[strum(serialize = "tam")] Tamil, #[strum(serialize = "vie")] Viatnamese, #[strum(serialize = "pol")] Polish, #[strum(serialize = "san")] Sanskrit, #[strum(serialize = "per")] Persian, #[strum(serialize = "fil")] Filipino, #[strum(serialize = "mol")] Moldavian, #[strum(serialize = "scr")] Croatian, #[strum(serialize = "tha")] Thai, #[strum(serialize = "bur")] Burmese, #[strum(serialize = "slo")] Slovak, #[strum(serialize = "cze")] Czech, #[strum(serialize = "hin")] Hindi, #[strum(serialize = "arn")] Mapudungun, #[strum(serialize = "tur")] Turkish, #[strum(serialize = "haw")] Hawaiian, #[strum(serialize = "afr")] Afrikaans, #[strum(serialize = "epo")] Esperanto, #[strum(serialize = "yid")] Yiddish, #[strum(serialize = "som")] Somali, #[strum(serialize = "tah")] Tahitian, #[strum(serialize = "urd")] Urdu, #[strum(serialize = "ind")] Indonesian, #[strum(serialize = "est")] Estonian, #[strum(serialize = "bul")] Bulgarian, #[strum(serialize = "ara")] Arabic, #[strum(serialize = "dan")] Danish, #[strum(serialize = "por")] Portuguese, #[strum(serialize = "fin")] Finnish, #[strum(serialize = "ain")] Ainu, #[strum(serialize = "alg")] Algonquian, #[strum(serialize = "fre")] French, } #[cfg(feature = "jotoba_intern")] impl Translatable for ForeignLanguage { fn get_id(&self) -> &'static str { match self { ForeignLanguage::English => "English", ForeignLanguage::Georgian => "Georgian", ForeignLanguage::German => "German", ForeignLanguage::Chinese => "Chinese", ForeignLanguage::Manchu => "Manchu", ForeignLanguage::Kurdish => "Kurdish", ForeignLanguage::ChinookJargon => "ChinookJargon", ForeignLanguage::Italian => "Italian", ForeignLanguage::Malayalam => "Malayalam", ForeignLanguage::Tibetian => "Tibetian", ForeignLanguage::Mongolian => "Mongolian", ForeignLanguage::Romanian => "Romanian", ForeignLanguage::Bantu => "Bantu", ForeignLanguage::Norwegian => "Norwegian", ForeignLanguage::Greek => "Greek", ForeignLanguage::Icelandic => "Icelandic", ForeignLanguage::Breton => "Breton", ForeignLanguage::Maori => "Maori", ForeignLanguage::Latin => "Latin", ForeignLanguage::Amharic => "Amharic", ForeignLanguage::Khmer => "Khmer", ForeignLanguage::Swahili => "Swahili ", ForeignLanguage::Hebrew => "Hebrew", ForeignLanguage::Galician => "Galician", ForeignLanguage::Korean => "Korean", ForeignLanguage::Tamil => "Tamil", ForeignLanguage::Viatnamese => "Viatnamese", ForeignLanguage::Polish => "Polish", ForeignLanguage::Sanskrit => "Sanskrit", ForeignLanguage::Persian => "Persian", ForeignLanguage::Filipino => "Filipino", ForeignLanguage::Moldavian => "Moldavian", ForeignLanguage::Croatian => "Croatian", ForeignLanguage::Thai => "Thai", ForeignLanguage::Burmese => "Burmese", ForeignLanguage::Slovak => "Slovak", ForeignLanguage::Czech => "Czech", ForeignLanguage::Hindi => "Hindi", ForeignLanguage::Mapudungun => "Mapudungun", ForeignLanguage::Turkish => "Turkish", ForeignLanguage::Hawaiian => "Hawaiian", ForeignLanguage::Afrikaans => "Afrikaans", ForeignLanguage::Esperanto => "Esperanto", ForeignLanguage::Yiddish => "Yiddish", ForeignLanguage::Somali => "Somali", ForeignLanguage::Tahitian => "Tahitian", ForeignLanguage::Urdu => "Urdu", ForeignLanguage::Indonesian => "Indonesian", ForeignLanguage::Estonian => "Estonian", ForeignLanguage::Bulgarian => "Bulgarian", ForeignLanguage::Arabic => "Arabic", ForeignLanguage::Danish => "Danish", ForeignLanguage::Portuguese => "Portuguese", ForeignLanguage::Finnish => "Finnish", ForeignLanguage::Ainu => "Ainu", ForeignLanguage::Algonquian => "Algonquian", ForeignLanguage::French => "French", } } } impl Default for ForeignLanguage { #[inline] fn default() -> Self { Self::English } } ================================================ FILE: lib/types/src/jotoba/words/gtype.rs ================================================ use serde::{Deserialize, Serialize}; use std::convert::TryFrom; use strum_macros::{AsRefStr, EnumString}; #[derive(Debug, PartialEq, Clone, Copy, AsRefStr, EnumString, Serialize, Deserialize, Hash)] #[repr(u8)] pub enum GType { #[strum(serialize = "lit")] Literal, #[strum(serialize = "fig")] Figurative, #[strum(serialize = "expl")] Explanation, } impl TryFrom for GType { type Error = (); #[inline] fn try_from(i: i32) -> Result { Ok(match i { 0 => Self::Literal, 1 => Self::Figurative, 2 => Self::Explanation, _ => return Err(()), }) } } impl Into for GType { #[inline] fn into(self) -> i32 { match self { Self::Literal => 0, Self::Figurative => 1, Self::Explanation => 2, } } } ================================================ FILE: lib/types/src/jotoba/words/inflection.rs ================================================ use serde::{Deserialize, Serialize}; #[cfg(feature = "jotoba_intern")] use jp_inflections::{Verb, VerbType, WordForm}; /// A single Inflection #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize)] pub enum Inflection { Negative, Polite, Present, Past, TeForm, Potential, Passive, Causative, CausativePassive, PotentialOrPassive, Imperative, Tai, TeIru, TeAru, TeMiru, TeShimau, Chau, TeOku, Toku, Tara, Tari, Ba, } #[cfg(feature = "jotoba_intern")] impl localization::traits::Translatable for Inflection { fn get_id(&self) -> &'static str { match self { Inflection::Negative => "Negative", Inflection::Polite => "Polite", Inflection::Present => "Present", Inflection::Past => "Past", Inflection::TeForm => "TeForm", Inflection::Potential => "Potential", Inflection::Passive => "Passive", Inflection::Causative => "Causative", Inflection::CausativePassive => "CausativePassive", Inflection::PotentialOrPassive => "PotentialOrPassive", Inflection::Imperative => "Imperative", Inflection::Tai => "Tai", Inflection::TeIru => "TeIru", Inflection::TeAru => "TeAru", Inflection::TeMiru => "TeMiru", Inflection::TeShimau => "TeShimau", Inflection::TeOku => "TeOku", Inflection::Chau => "Chau", Inflection::Toku => "Toku", Inflection::Tara => "Tara", Inflection::Tari => "Tari", Inflection::Ba => "Ba", } } fn gettext<'a>( &self, dict: &'a localization::TranslationDict, language: Option, ) -> &'a str { self.pgettext(dict, "inflection", language) } } /// A set of different inflections which will be displayed for vebs #[derive(Serialize, Deserialize)] pub struct Inflections { pub present: InflectionPair, pub present_polite: InflectionPair, pub past: InflectionPair, pub past_polite: InflectionPair, pub te_form: InflectionPair, pub potential: InflectionPair, pub passive: InflectionPair, pub causative: InflectionPair, pub causative_passive: InflectionPair, pub imperative: InflectionPair, } #[derive(Serialize, Deserialize)] pub struct InflectionPair { #[serde(rename = "p")] pub positive: String, #[serde(rename = "n")] pub negative: String, } pub fn build_inflections( verb: &Verb, is_exception: bool, ) -> Result { return Ok(Inflections { present: InflectionPair { positive: verb.dictionary(WordForm::Short)?.try_kana(is_exception), negative: verb.negative(WordForm::Short)?.try_kana(is_exception), }, present_polite: InflectionPair { positive: verb.dictionary(WordForm::Long)?.try_kana(is_exception), negative: verb.negative(WordForm::Long)?.try_kana(is_exception), }, past: InflectionPair { positive: verb.past(WordForm::Short)?.try_kana(is_exception), negative: verb.negative_past(WordForm::Short)?.try_kana(is_exception), }, past_polite: InflectionPair { positive: verb.past(WordForm::Long)?.try_kana(is_exception), negative: verb.negative_past(WordForm::Long)?.try_kana(is_exception), }, te_form: InflectionPair { positive: verb.te_form()?.try_kana(is_exception), negative: verb.negative_te_form()?.try_kana(is_exception), }, potential: InflectionPair { positive: verb.potential(WordForm::Short)?.try_kana(is_exception), negative: verb .negative_potential(WordForm::Short)? .try_kana(is_exception), }, passive: InflectionPair { positive: verb.passive()?.try_kana(is_exception), negative: verb.negative_passive()?.try_kana(is_exception), }, causative: InflectionPair { positive: verb.causative()?.try_kana(is_exception), negative: verb.negative_causative()?.try_kana(is_exception), }, causative_passive: InflectionPair { positive: verb.causative_passive()?.try_kana(is_exception), negative: verb.negative_causative_passive()?.try_kana(is_exception), }, imperative: InflectionPair { positive: verb.imperative()?.try_kana(is_exception), negative: verb.imperative_negative()?.try_kana(is_exception), }, }); } /// Returns the inflections of `word` if its a verb #[cfg(feature = "jotoba_intern")] pub fn of_word(word: &super::Word) -> Option { let verb = get_jp_verb(word)?; let is_exception = word .reading .kanji .as_ref() .map(|kanji| kanji.reading == "為る" || kanji.reading == "来る") .unwrap_or(false); build_inflections(&verb, is_exception).ok() } /// Returns a jp_inflections::Verb if [`self`] is a verb #[cfg(feature = "jotoba_intern")] pub fn get_jp_verb(word: &super::Word) -> Option { use super::part_of_speech::PartOfSpeech; use crate::jotoba::words::part_of_speech::{self, IrregularVerb}; let is_exception = word.get_pos().any(|i| match i { PartOfSpeech::Verb(v) => match v { part_of_speech::VerbType::Irregular(i) => match i { IrregularVerb::Suru => true, _ => false, }, part_of_speech::VerbType::Kuru => true, _ => false, }, _ => false, }); let verb_type = if word.get_pos().any(|i| i.is_ichidan()) { VerbType::Ichidan } else if word.get_pos().any(|i| i.is_godan()) { VerbType::Godan } else if is_exception { VerbType::Exception } else { return None; }; let verb = Verb::new( jp_inflections::Word::new( &word.reading.kana.reading, word.reading.kanji.as_ref().map(|i| &i.reading), ), verb_type, ); // Check if [`verb`] really is a valid verb in dictionary form verb.word.is_verb().then(|| verb) } ================================================ FILE: lib/types/src/jotoba/words/information.rs ================================================ #[cfg(feature = "jotoba_intern")] use localization::traits::Translatable; use strum_macros::{AsRefStr, EnumString}; use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Copy, AsRefStr, EnumString, Serialize, Deserialize, Hash, Eq)] #[repr(u8)] pub enum Information { #[strum(serialize = "ateji")] Ateji, #[strum(serialize = "ik")] IrregularKana, #[strum(serialize = "iK")] IrregularKanji, #[strum(serialize = "io")] IrregularOkurigana, #[strum(serialize = "oK")] OutdatedKanji, #[strum(serialize = "ok")] OutdatedKana, #[strum(serialize = "gikun")] Gikun, #[strum(serialize = "uK")] UsuallyKana, #[strum(serialize = "rK")] RarelyUsedKanjiForm, #[strum(serialize = "sK")] SearchOnlyKanji, #[strum(serialize = "sk")] SearchOnlyKana, } #[cfg(feature = "jotoba_intern")] impl Translatable for Information { fn get_id(&self) -> &'static str { match self { Information::Ateji => "ateji", Information::IrregularKana => "irregular kana", Information::IrregularKanji => "irregular kanji", Information::IrregularOkurigana => "irregular okurigana", Information::OutdatedKanji => "outdated kanji", Information::OutdatedKana => "outdated kana", Information::Gikun => "gikun", Information::UsuallyKana => "usually written in kana", Information::RarelyUsedKanjiForm => "rarely used kanji form", Information::SearchOnlyKanji => "Seach only kanji form", Information::SearchOnlyKana => "Seach only kana form", } } } ================================================ FILE: lib/types/src/jotoba/words/misc.rs ================================================ #[cfg(feature = "jotoba_intern")] use localization::traits::Translatable; use strum::IntoEnumIterator; use strum_macros::{AsRefStr, EnumIter, EnumString}; use serde::{Deserialize, Serialize}; #[derive( Debug, PartialEq, Clone, Copy, AsRefStr, EnumString, Serialize, Deserialize, Hash, EnumIter, )] #[repr(u8)] pub enum Misc { #[strum(serialize = "ship")] Ship, #[strum(serialize = "abbr", serialize = "abbreviation")] Abbreviation, #[strum(serialize = "arch", serialize = "archaism")] Archaism, #[strum(serialize = "char")] Character, #[strum(serialize = "chn", serialize = "childrenslanguage")] ChildrensLanguage, #[strum(serialize = "col", serialize = "colloquialism")] Colloquialism, #[strum(serialize = "company")] CompanyName, #[strum(serialize = "creat")] Creature, #[strum(serialize = "dated")] DatedTerm, #[strum(serialize = "dei")] Deity, #[strum(serialize = "derog", serialize = "derogatory")] Derogatory, #[strum(serialize = "doc")] Document, #[strum(serialize = "ev")] Event, #[strum(serialize = "euph")] Euphemistic, #[strum(serialize = "fam", serialize = "familiarlanguage")] FamiliarLanguage, #[strum(serialize = "fem", serialize = "femaleterm")] FemaleTermOrLanguage, #[strum(serialize = "fict", serialize = "fiction")] Fiction, #[strum(serialize = "given")] GivenName, #[strum(serialize = "group")] Group, #[strum(serialize = "hist", serialize = "Historical")] HistoricalTerm, #[strum(serialize = "hon", serialize = "honorific")] HonorificLanguage, #[strum(serialize = "hum", serialize = "humblelanguage")] HumbleLanguage, #[strum(serialize = "id", serialize = "idomatic")] IdiomaticExpression, #[strum(serialize = "joc")] JocularHumorousTerm, #[strum(serialize = "leg", serialize = "legend")] Legend, #[strum(serialize = "form", serialize = "formal")] LiteraryOrFormalTerm, #[strum(serialize = "m-sl", serialize = "mangaslang")] MangaSlang, #[strum(serialize = "male", serialize = "maleterm")] MaleTermOrLanguage, #[strum(serialize = "myth")] Mythology, #[strum(serialize = "net-sl", serialize = "internetslang")] InternetSlang, #[strum(serialize = "obj", serialize = "object")] Object, #[strum(serialize = "obs", serialize = "obsolete")] ObsoleteTerm, #[strum(serialize = "obsc", serialize = "obscure")] ObscureTerm, #[strum(serialize = "on-mim", serialize = "onomatopoeic")] OnomatopoeicOrMimeticWord, #[strum(serialize = "organization")] OrganizationName, #[strum(serialize = "oth", serialize = "other")] Other, #[strum(serialize = "person", serialize = "personname")] Personname, #[strum(serialize = "place", serialize = "placename")] PlaceName, #[strum(serialize = "poet", serialize = "poeticalterm")] PoeticalTerm, #[strum(serialize = "pol", serialize = "politelanguage")] PoliteLanguage, #[strum(serialize = "product", serialize = "productname")] ProductName, #[strum(serialize = "proverb")] Proverb, #[strum(serialize = "quote", serialize = "quotation")] Quotation, #[strum(serialize = "rare")] Rare, #[strum(serialize = "relig")] Religion, #[strum(serialize = "sens", serialize = "sensitive")] Sensitive, #[strum(serialize = "serv")] Service, #[strum(serialize = "sl", serialize = "slang")] Slang, #[strum(serialize = "station")] RailwayStation, #[strum(serialize = "surname")] FamilyOrSurname, #[strum(serialize = "uk", serialize = "usuallykana")] UsuallyWrittenInKana, #[strum(serialize = "unclass")] UnclassifiedName, #[strum(serialize = "vulg", serialize = "vulgar")] VulgarExpressionOrWord, #[strum(serialize = "work", serialize = "artwork")] ArtWork, #[strum(serialize = "X", serialize = "rude")] RudeOrXRatedTerm, #[strum(serialize = "yoji", serialize = "yojijukugo")] Yojijukugo, } #[cfg(feature = "jotoba_intern")] impl Translatable for Misc { fn get_id(&self) -> &'static str { match self { Misc::Abbreviation => "Abbreviation", Misc::Archaism => "Archaism", Misc::Character => "Character", Misc::ChildrensLanguage => "Childrens language", Misc::Colloquialism => "Colloquialism", Misc::CompanyName => "Company name", Misc::Creature => "Creature", Misc::DatedTerm => "Dated term", Misc::Deity => "Deity", Misc::Derogatory => "Derogatory", Misc::Document => "Document", Misc::Event => "Event", Misc::Euphemistic => "Euphemistic", Misc::FamiliarLanguage => "Familiar language", Misc::FemaleTermOrLanguage => "Female term/language", Misc::Fiction => "Fiction", Misc::GivenName => "Given name", Misc::Group => "Group", Misc::HistoricalTerm => "Historical term", Misc::HonorificLanguage => "Honorific language", Misc::HumbleLanguage => "Humble language", Misc::IdiomaticExpression => "Idiomatic expression", Misc::JocularHumorousTerm => "Jocular humorous term", Misc::Legend => "Legend", Misc::LiteraryOrFormalTerm => "Literary/formal term", Misc::MangaSlang => "Manga slang", Misc::MaleTermOrLanguage => "Male term/language", Misc::Mythology => "Mythology", Misc::InternetSlang => "Internet slang", Misc::Object => "Object", Misc::ObsoleteTerm => "Obsolete term", Misc::ObscureTerm => "Obscure term", Misc::OnomatopoeicOrMimeticWord => "Onomatopoetic or mimetic word", Misc::OrganizationName => "Organization name", Misc::Other => "Other", Misc::Personname => "Person name", Misc::PlaceName => "Place name", Misc::PoeticalTerm => "Poetical term", Misc::PoliteLanguage => "Polite language", Misc::ProductName => "Product name", Misc::Proverb => "Proverb", Misc::Quotation => "Qutation", Misc::Rare => "Rare", Misc::Religion => "Religion", Misc::Sensitive => "Sensitive", Misc::Service => "Service", Misc::Slang => "Slang", Misc::RailwayStation => "Railway station", Misc::FamilyOrSurname => "Family or surname", Misc::UsuallyWrittenInKana => "Usually written in kana", Misc::UnclassifiedName => "Unclassified name", Misc::Ship => "Ship", Misc::VulgarExpressionOrWord => "Vulgar expression/word", Misc::ArtWork => "Artwork", Misc::RudeOrXRatedTerm => "Rude/x-rated term", Misc::Yojijukugo => "Yojijukugo", } } } impl Misc { #[inline] pub fn iter() -> impl Iterator { ::iter() } #[inline] pub fn as_str(&self) -> &str { self.as_ref() } } ================================================ FILE: lib/types/src/jotoba/words/mod.rs ================================================ pub mod dialect; pub mod dict; pub mod field; pub mod foreign_language; pub mod gtype; pub mod inflection; pub mod information; pub mod misc; pub mod part_of_speech; pub mod pitch; pub mod priority; pub mod reading; pub mod sense; pub use dict::Dict; use super::language::{param::AsLangParam, Language}; use bitflags::BitFlag; use itertools::Itertools; use jp_utils::{ furi::{parse::FuriParser, segment::SegmentRef}, JapaneseExt, }; use misc::Misc; use part_of_speech::{PartOfSpeech, PosSimple}; use pitch::{raw_data::PitchValues, Pitch}; use reading::{Reading, ReadingIter}; use sense::{Sense, SenseGlossIter}; use serde::{Deserialize, Serialize}; use std::{ hash::{Hash, Hasher}, num::{NonZeroU32, NonZeroU8}, path::Path, }; /// A single word in Jotobas word search #[derive(Clone, Default, Serialize, Deserialize, Eq)] pub struct Word { pub sequence: u32, pub common: bool, pub reading: Reading, pub senses: Vec, pub furigana: Option, pub jlpt_lvl: Option, pub collocations: Option>, pub transive_version: Option, pub intransive_version: Option, pub sentences_available: u16, pub accents: PitchValues, } impl Word { /// Returns true if a word is common #[inline] pub fn is_common(&self) -> bool { self.common } /// Returns the jlpt level of a word. `None` if a word doesn't have a JLPT lvl assigned #[inline] pub fn get_jlpt_lvl(&self) -> Option { self.jlpt_lvl.map(|i| i.get()) } /// Returns the main reading of a word. This is the kanji reading if a kanji reading /// exists. Otherwise its the kana reading #[inline] pub fn get_reading(&self) -> &Dict { self.reading.get_reading() } /// Returns the main reading of a word as str. This is the kanji reading if a kanji reading /// exists. Otherwise its the kana reading #[inline] pub fn get_reading_str(&self) -> &str { &self.get_reading().reading } /// Returns an iterator over all sense and its glosses #[inline] pub fn sense_gloss_iter(&self) -> SenseGlossIter { SenseGlossIter::new(&self) } /// Return all senses of a language #[inline] pub fn senses_by_lang(&self, language: impl AsLangParam) -> Vec<&Sense> { let language = language.as_lang(); self.senses .iter() .filter(|i| language.eq_to_lang(&i.language)) .collect() } /// Get senses ordered by language (non-english first) pub fn get_senses_orderd(&self, english_on_top: bool, _language: Language) -> Vec> { let (english, other): (Vec, Vec) = self .senses .clone() .into_iter() .partition(|i| i.language == Language::English); if english_on_top { vec![english, other] } else { vec![other, english] } } /// Get senses ordered by language (non-english first) pub fn get_senses_with_en(&self) -> Vec> { let (english, other): (Vec, Vec) = self .senses .clone() .into_iter() .partition(|i| i.language == Language::English); vec![other, english] } /// Returns all senses of the word #[inline] pub fn senses(&self) -> &[Sense] { &self.senses } #[inline] pub fn sense_by_id(&self, id: u8) -> Option<&Sense> { self.senses.get(id as usize) } pub fn get_sense_gloss(&self, id: u16) -> Option<(&Sense, &sense::Gloss)> { let (sense_id, gloss_id) = sense::from_unique_id(id); let sense = self.sense_by_id(sense_id)?; let gloss = sense.gloss_by_id(gloss_id)?; Some((sense, gloss)) } /// Returns an Iterator over the words glosses using a given language pub fn gloss_iter_by_lang(&self, lang_param: impl AsLangParam) -> impl Iterator { let lang_param = lang_param.as_lang(); self.sense_gloss_iter() .filter(move |i| lang_param.eq_to_lang(&i.0.language)) .map(|i| i.1.gloss.as_str()) } /// Get amount of tags which will be displayed below the reading pub fn get_word_tag_count(&self) -> u8 { [self.is_common(), self.get_jlpt_lvl().is_some()] .iter() .filter(|b| **b) .count() as u8 } /// Returns `true` if the word has at least one sentence in the given language pub fn has_sentence(&self, lang: impl AsLangParam) -> bool { let lang_p = lang.as_lang(); let lang: i32 = lang_p.language().into(); BitFlag::::from(self.sentences_available).get(lang as u16) || (lang_p.en_fallback() && !lang_p.is_english() && BitFlag::::from(self.sentences_available).get(Language::English as u16)) } /// Returns true if word has a misc information matching `misc`. This requires english glosses /// to be available since they're the only one holding misc information #[inline] pub fn has_misc(&self, misc: &Misc) -> bool { self.senses .iter() .filter_map(|i| i.misc) .any(|i| i == *misc) } /// Returns `true` if word has at least one of the provided part of speech pub fn has_pos(&self, pos_filter: &[PosSimple]) -> bool { for sense in self.senses.iter().map(|i| i.get_pos_simple()) { if sense.iter().any(|i| pos_filter.contains(i)) { return true; } } false } /// Returns `true` if word has all of the provided part of speech #[inline] pub fn has_all_pos(&self, pos_filter: &[PosSimple]) -> bool { self.has_all_pos_iter(pos_filter.iter()) } /// Returns `true` if word has all of the provided part of speech #[inline] pub fn has_all_pos_iter<'a, I>(&self, mut pos_filter: I) -> bool where I: Iterator + 'a, { pos_filter.all(|pos| self.senses.iter().any(|s| s.has_pos_simple(pos))) } /// Returns `true` if a word has at least one translation for the provided language, or english /// if `allow_english` is `true` #[inline] pub fn has_language(&self, language: impl AsLangParam) -> bool { let lang = language.as_lang(); self.senses.iter().any(|i| lang.eq_to_lang(&i.language)) } /// Returns `true` if a word has collocations #[inline] pub fn has_collocations(&self) -> bool { self.collocations.is_some() } /// Returns an iterator over all reading elements #[inline] pub fn reading_iter(&self, allow_kana: bool) -> ReadingIter<'_> { self.reading.iter(allow_kana) } /// Returns true if word has `reading` #[inline] pub fn has_reading(&self, reading: &str) -> bool { self.reading_iter(true).any(|j| j.reading == reading) } /// Returns `true` if the word has a kanji reading #[inline] pub fn has_kanji(&self) -> bool { self.get_reading_str().has_kanji() } /// Returns `true` if `word` has `reading` as main (main kanji or kana reading) pub fn has_main_reading(&self, reading: &str) -> bool { self.reading.kana.reading == reading || self .reading .kanji .as_ref() .map(|i| i.reading == reading) .unwrap_or(false) } /// Returns an iterator over all parts of speech of a word #[inline] pub fn get_pos(&self) -> impl Iterator { self.senses .iter() .map(|i| i.part_of_speech.iter()) .flatten() } #[inline] pub fn get_kana(&self) -> &str { &self.reading.kana.reading } #[inline] pub fn has_pitch(&self) -> bool { !self.accents.is_empty() } /// Returns a renderable vec of accents with kana characters pub fn get_pitches(&self) -> Vec { self.accents .iter() .filter_map(|drop| Pitch::new(self.get_kana(), drop)) .collect() } /// Returns a renderable vec of accents with kana characters #[inline] pub fn get_first_pitch(&self) -> Option { let drop = self.accents.get(0)?; Pitch::new(self.get_kana(), drop) } /// Return `true` if the word is a katakana word #[inline] pub fn is_katakana_word(&self) -> bool { self.reading.is_katakana() } /// Removes all languages except the one specified and potentionally english when enabled #[inline] pub fn adjust_language(&mut self, lang: impl AsLangParam) { let lang = lang.as_lang(); self.senses.retain(|j| lang.eq_to_lang(&j.language)); } /// Returns furigana reading-pairs of an Item #[inline] pub fn get_furigana(&self) -> Option> { let furi = self.furigana.as_ref()?; FuriParser::new(furi) .collect::, _>>() .ok() } } // Jotoba intern only features #[cfg(feature = "jotoba_intern")] impl Word { /// Get the audio's filename of the word #[inline] pub fn audio_file_name(&self) -> Option { self.reading .kanji .as_ref() .map(|kanji| format!("{}【{}】.mp3", kanji.reading, self.reading.kana.reading)) } /// Get the audio's filename of the word #[inline] pub fn audio_file_name_old(&self) -> Option { self.reading.kanji.as_ref().and_then(|kanji| { /* let frame_path = format!("svg/kanji/{}_frames.svg", self.literal); let frame_path = Path::new(&frame_path); assets_path.as_ref().join(frame_path) */ let file = format!("{}【{}】.mp3", kanji.reading, self.reading.kana.reading); std::path::Path::new(&format!("html/audio/mp3/{}", file)) .exists() .then(|| file) }) } /// Get the audio path of a word #[inline] pub fn audio_file>(&self, _assets_path: P) -> Option { self.reading.kanji.as_ref().and_then(|kanji| { let file = format!("mp3/{}【{}】.mp3", kanji.reading, self.reading.kana.reading); std::path::Path::new(&format!("html/audio/{}", file)) .exists() .then(|| file) }) } /// Get alternative readings in a beautified, print-ready format #[inline] pub fn alt_readings_beautified(&self) -> String { self.reading .alternative .iter() .map(|i| i.reading.clone()) .join(", ") } pub fn glosses_pretty(&self) -> String { let senses = self.get_senses_with_en(); // Try to use glosses with users language if !senses[0].is_empty() { Self::pretty_print_senses(&senses[0]) } else { // Fallback use english gloses Self::pretty_print_senses(&senses[1]) } } fn pretty_print_senses(senses: &[Sense]) -> String { senses .iter() .map(|i| i.glosses.clone()) .flatten() .into_iter() .map(|i| i.gloss) .join(", ") } /// Returns an [`Inflections`] value if [`self`] is a valid verb #[inline] pub fn get_inflections(&self) -> Option { inflection::of_word(self) } } /// Removes all senses which ain't in the provided language or english in case `show_english` is /// `true` #[cfg(feature = "jotoba_intern")] pub fn filter_languages<'a, I: 'a + Iterator>( iter: I, lang: impl AsLangParam, ) { for word in iter { word.adjust_language(lang); } } impl Hash for Word { #[inline] fn hash(&self, state: &mut H) { self.sequence.hash(state); } } impl PartialEq for Word { #[inline] fn eq(&self, other: &Self) -> bool { self.sequence == other.sequence } } impl std::fmt::Debug for Word { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let senses = self .senses_by_lang(Language::English) .into_iter() .map(|i| i.glosses.iter().map(|i| &i.gloss).join("|")) .join("\n"); f.debug_struct("Word") .field("Seq", &self.sequence) .field("Kana", &self.reading.kana.reading) .field("Reading", &self.get_reading().reading) .field("Common", &self.is_common()) .field("JLPT", &self.jlpt_lvl) .field("Translations", &senses) .finish() } } ================================================ FILE: lib/types/src/jotoba/words/part_of_speech.rs ================================================ #![allow(clippy::from_over_into)] use std::convert::TryFrom; #[cfg(feature = "jotoba_intern")] use localization::{language::Language, traits::Translatable, TranslationDict}; use serde::{Deserialize, Serialize}; use strum::{AsRefStr, EnumIter, IntoEnumIterator}; use strum_macros::EnumString; #[derive( Debug, PartialEq, Clone, Copy, Hash, EnumString, EnumIter, AsRefStr, Serialize, Deserialize, Ord, PartialOrd, Eq, )] #[repr(u8)] pub enum PosSimple { #[strum(serialize = "adverb", serialize = "adv")] Adverb, #[strum(serialize = "auxilary", serialize = "aux")] Auxilary, #[strum(serialize = "conjunction", serialize = "conj")] Conjunction, #[strum(serialize = "noun", serialize = "n")] Noun, #[strum(serialize = "prefix", serialize = "pre")] Prefix, #[strum(serialize = "suffix", serialize = "suf")] Suffix, #[strum(serialize = "particle", serialize = "part")] Particle, #[strum(serialize = "sfx")] Sfx, #[strum(serialize = "verb", serialize = "v")] Verb, #[strum(serialize = "adjective", serialize = "adj")] Adjective, #[strum(serialize = "counter", serialize = "count")] Counter, #[strum(serialize = "expression", serialize = "expr")] Expr, #[strum(serialize = "interjection", serialize = "inter")] Interjection, #[strum(serialize = "pronoun", serialize = "pron")] Pronoun, #[strum(serialize = "numeric", serialize = "nr")] Numeric, #[strum(serialize = "transitive", serialize = "tr")] Transitive, #[strum(serialize = "intransitive", serialize = "itr")] Intransitive, #[strum(serialize = "unclassified", serialize = "unc")] Unclassified, } impl PosSimple { #[inline] pub fn iter() -> impl Iterator { ::iter() } #[inline] pub fn as_str(&self) -> &str { self.as_ref() } } impl TryFrom for PosSimple { type Error = (); fn try_from(i: i32) -> Result { Ok(match i { 0 => Self::Adverb, 1 => Self::Auxilary, 2 => Self::Conjunction, 3 => Self::Noun, 4 => Self::Prefix, 5 => Self::Suffix, 6 => Self::Particle, 7 => Self::Sfx, 8 => Self::Verb, 9 => Self::Adjective, 10 => Self::Counter, 11 => Self::Expr, 12 => Self::Interjection, 13 => Self::Pronoun, 15 => Self::Numeric, 16 => Self::Unclassified, 17 => Self::Intransitive, 18 => Self::Transitive, _ => return Err(()), }) } } impl Into for PosSimple { fn into(self) -> i32 { match self { Self::Adverb => 0, Self::Auxilary => 1, Self::Conjunction => 2, Self::Noun => 3, Self::Prefix => 4, Self::Suffix => 5, Self::Particle => 6, Self::Sfx => 7, Self::Verb => 8, Self::Adjective => 9, Self::Counter => 10, Self::Expr => 11, Self::Interjection => 12, Self::Pronoun => 13, Self::Numeric => 15, Self::Unclassified => 16, Self::Intransitive => 17, Self::Transitive => 18, } } } impl PartOfSpeech { /// Converts a `PartOfSpeech` tag to `PosSimple` pub fn to_pos_simple(&self) -> Vec { let simple = match *self { PartOfSpeech::Adjective(_) | PartOfSpeech::AuxilaryAdj => PosSimple::Adjective, PartOfSpeech::Adverb | PartOfSpeech::AdverbTo => PosSimple::Adverb, PartOfSpeech::Auxilary => PosSimple::Auxilary, PartOfSpeech::Conjunction => PosSimple::Conjunction, PartOfSpeech::Counter => PosSimple::Counter, PartOfSpeech::Expr => PosSimple::Expr, PartOfSpeech::Interjection => PosSimple::Interjection, PartOfSpeech::Noun(n) => match n { NounType::Suffix => PosSimple::Suffix, _ => PosSimple::Noun, }, PartOfSpeech::Numeric => PosSimple::Numeric, PartOfSpeech::Pronoun => PosSimple::Pronoun, PartOfSpeech::Prefix => PosSimple::Prefix, PartOfSpeech::Suffix => PosSimple::Suffix, PartOfSpeech::Particle => PosSimple::Particle, PartOfSpeech::Unclassified => PosSimple::Unclassified, PartOfSpeech::Sfx => PosSimple::Sfx, PartOfSpeech::Verb(_) | PartOfSpeech::AuxilaryVerb => PosSimple::Verb, }; if let PartOfSpeech::Verb(verb) = self { match verb { VerbType::Intransitive => vec![simple, PosSimple::Intransitive], VerbType::Transitive => vec![simple, PosSimple::Transitive], VerbType::Irregular(irr) => match irr { IrregularVerb::NounOrAuxSuru => vec![simple, PosSimple::Noun], _ => vec![simple], }, _ => vec![simple], } } else { vec![simple] } } } #[derive(Debug, PartialEq, Clone, Copy, Serialize, PartialOrd, Ord, Eq, Deserialize, Hash)] #[repr(u8)] pub enum PartOfSpeech { // Adjectives Adjective(AdjectiveType), // Adverb Adverb, AdverbTo, // Auxilary Auxilary, AuxilaryAdj, AuxilaryVerb, // Other Conjunction, Counter, Expr, Interjection, Noun(NounType), Numeric, Pronoun, Prefix, Suffix, Particle, Unclassified, Sfx, // Verb Verb(VerbType), } impl PartOfSpeech { /// Returns true if [`self`] is a godan PartOfSpeech variant pub fn is_godan(&self) -> bool { if let PartOfSpeech::Verb(v) = self { matches!(v, VerbType::Godan(_)) } else { false } } /// Returns true if [`self`] is an ichdan PartOfSpeech variant pub fn is_ichidan(&self) -> bool { if let PartOfSpeech::Verb(v) = self { match v { VerbType::Ichidan => true, _ => false, } } else { false } } } #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq, Serialize, Deserialize, Hash)] #[repr(u8)] pub enum VerbType { Nidan(NidanVerb), Yodan(VerbEnding), Godan(GodanVerbEnding), Irregular(IrregularVerb), Unspecified, Intransitive, Transitive, Ichidan, IchidanZuru, IchidanKureru, Kuru, } #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq, Serialize, Deserialize, Hash)] #[repr(u8)] pub enum AdjectiveType { PreNounVerb, /// I Adjective Keiyoushi, /// I Adjective conjugated like いい KeiyoushiYoiIi, Ku, Na, Nari, No, PreNoun, Shiku, Taru, } #[derive(Debug, Clone, Copy, PartialEq, Serialize, PartialOrd, Ord, Eq, Deserialize, Hash)] #[repr(u8)] pub enum NounType { Normal, Adverbial, Prefix, Suffix, Temporal, } #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, PartialOrd, Ord, Eq, Hash)] #[repr(u8)] pub enum IrregularVerb { Nu, Ru, NounOrAuxSuru, Suru, SuruSpecial, Su, } #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, PartialOrd, Ord, Eq, Hash)] pub struct NidanVerb { class: VerbClass, ending: VerbEnding, } #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, PartialOrd, Ord, Eq, Hash)] #[repr(u8)] pub enum VerbClass { Upper, Lower, None, } #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, PartialOrd, Ord, Eq, Hash)] #[repr(u8)] pub enum VerbEnding { Bu, Dzu, Gu, Hu, Ku, Mu, Nu, Ru, Su, Tsu, U, Yu, Zu, } #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, PartialOrd, Ord, Eq, Hash)] #[repr(u8)] pub enum GodanVerbEnding { Bu, Gu, Ku, Mu, Nu, Ru, Su, Tsu, U, Aru, USpecial, Uru, RuIrreg, IkuYuku, } #[cfg(feature = "jotoba_intern")] impl Translatable for PartOfSpeech { fn get_id(&self) -> &'static str { match self { PartOfSpeech::Noun(noun_type) => noun_type.get_id(), PartOfSpeech::Sfx => "SoundFx", PartOfSpeech::Expr => "Expression", PartOfSpeech::Counter => "Counter", PartOfSpeech::Suffix => "Suffix", PartOfSpeech::Prefix => "Prefix", PartOfSpeech::Particle => "Particle", PartOfSpeech::Interjection => "Interjection", PartOfSpeech::Pronoun => "Pronoun", PartOfSpeech::Auxilary => "Auxilary", PartOfSpeech::Adjective(adj) => adj.get_id(), PartOfSpeech::Numeric => "Numeric", PartOfSpeech::AdverbTo => "Adverb-To", PartOfSpeech::Adverb => "Adverb", PartOfSpeech::Verb(verb) => verb.get_id(), PartOfSpeech::AuxilaryAdj => "Auxilary adjective", PartOfSpeech::AuxilaryVerb => "Auxilary Verb", PartOfSpeech::Conjunction => "Conjunction", PartOfSpeech::Unclassified => "Unclassified", } } fn gettext_custom(&self, dict: &TranslationDict, language: Option) -> String { match self { PartOfSpeech::Verb(verb) => verb.gettext_custom(dict, language), _ => self.gettext(dict, language).to_owned(), } } } #[cfg(feature = "jotoba_intern")] impl Translatable for AdjectiveType { fn get_id(&self) -> &'static str { match self { AdjectiveType::PreNounVerb => "Noun or verb describing a noun", AdjectiveType::Keiyoushi => "I adjective", AdjectiveType::KeiyoushiYoiIi => "I adjective (conjugated like いい)", AdjectiveType::Ku => "Ku adjective", AdjectiveType::Na => "Na adjective", AdjectiveType::Nari => "Formal form of na adjective", AdjectiveType::No => "No adjective", AdjectiveType::PreNoun => "Pre noun adjective", AdjectiveType::Shiku => "Shiku adjective", AdjectiveType::Taru => "Taru adjective", } } } #[cfg(feature = "jotoba_intern")] impl Translatable for NounType { fn get_id(&self) -> &'static str { match self { NounType::Normal => "Noun", NounType::Adverbial => "Noun adverbial", NounType::Prefix => "Prefix (noun)", NounType::Suffix => "Suffix (noun)", NounType::Temporal => "Temporal noun", } } } #[cfg(feature = "jotoba_intern")] impl Translatable for VerbType { fn get_id(&self) -> &'static str { match *self { VerbType::Unspecified => "Unspecified verb", VerbType::Intransitive => "Intransitive verb", VerbType::Transitive => "Transitive verb", VerbType::Ichidan => "Ichidan verb", VerbType::IchidanZuru => "Ichidan zuru verb", VerbType::IchidanKureru => "Ichidan kureru verb", VerbType::Kuru => "Kuru verb", VerbType::Irregular(irregular) => irregular.get_id(), _ => "Godan verb", } } fn gettext_custom(&self, dict: &TranslationDict, language: Option) -> String { match self { VerbType::Irregular(i) => i.gettext_custom(dict, language), _ => self.gettext(dict, language).to_owned(), } } } #[cfg(feature = "jotoba_intern")] impl Translatable for IrregularVerb { fn get_id(&self) -> &'static str { match self { IrregularVerb::Nu | IrregularVerb::Ru | IrregularVerb::Su => { "Irregular verb with {} ending" } IrregularVerb::NounOrAuxSuru => "Noun taking suru", IrregularVerb::Suru => "Suru verb", IrregularVerb::SuruSpecial => "Suru special", } } fn gettext_custom(&self, dict: &TranslationDict, language: Option) -> String { match self { IrregularVerb::Nu => self.gettext_fmt(dict, &["nu"], language), IrregularVerb::Ru => self.gettext_fmt(dict, &["ru"], language), IrregularVerb::Su => self.gettext_fmt(dict, &["su"], language), IrregularVerb::NounOrAuxSuru | IrregularVerb::Suru | IrregularVerb::SuruSpecial => { self.gettext(dict, language).to_owned() } } } } /// VerbType into String impl Into for VerbType { fn into(self) -> String { match self { VerbType::Nidan(nidan) => { let n: String = nidan.into(); format!("{}{}", "v2", n) } VerbType::Yodan(yodan) => { let y: String = yodan.into(); format!("{}{}", "v4", y) } VerbType::Godan(godan) => { let g: String = godan.into(); format!("{}{}", "v5", g) } VerbType::Irregular(irreg) => irreg.into(), VerbType::Ichidan => "v1".to_owned(), VerbType::IchidanKureru => "v1-s".to_owned(), VerbType::Transitive => "vt".to_owned(), VerbType::Intransitive => "vi".to_owned(), VerbType::Kuru => "vk".to_owned(), VerbType::IchidanZuru => "vz".to_owned(), VerbType::Unspecified => "v-unspec".to_owned(), } } } /// Implement TryFrom for VerbType impl TryFrom<&str> for VerbType { type Error = (); fn try_from(value: &str) -> Result { if value.len() < 2 || value[..1] != *"v" { return Err(()); } Ok(match &value[1..2] { "1" => match value { "v1" => VerbType::Ichidan, "v1-s" => VerbType::IchidanKureru, _ => return Err(()), }, "2" => VerbType::Nidan(NidanVerb::try_from(value)?), // Nidan "4" => VerbType::Yodan(VerbEnding::try_from(&value[2..3])?), // Yodan "5" => VerbType::Godan(GodanVerbEnding::try_from(&value[2..])?), // Godan _ => match value { "vi" => VerbType::Intransitive, "vt" => VerbType::Transitive, "v-unspec" => VerbType::Unspecified, "vz" => VerbType::IchidanZuru, "vk" => VerbType::Kuru, _ => VerbType::Irregular(IrregularVerb::try_from(value)?), }, }) } } impl TryFrom<&str> for IrregularVerb { type Error = (); fn try_from(value: &str) -> Result { Ok(match value { "vn" => IrregularVerb::Nu, "vr" => IrregularVerb::Ru, "vs" => IrregularVerb::NounOrAuxSuru, "vs-i" => IrregularVerb::Suru, "vs-s" => IrregularVerb::SuruSpecial, "vs-c" => IrregularVerb::Su, _ => return Err(()), }) } } /// IrregularVerb into String impl Into for IrregularVerb { fn into(self) -> String { match self { IrregularVerb::Nu => "vn", IrregularVerb::Ru => "vr", IrregularVerb::NounOrAuxSuru => "vs", IrregularVerb::Suru => "vs-i", IrregularVerb::SuruSpecial => "vs-s", IrregularVerb::Su => "vs-c", } .to_string() } } /// GodanVerbEnding into String impl Into for GodanVerbEnding { fn into(self) -> String { match self { GodanVerbEnding::Aru => "aru", GodanVerbEnding::USpecial => "u-s", GodanVerbEnding::Uru => "uru", GodanVerbEnding::RuIrreg => "r-i", GodanVerbEnding::IkuYuku => "k-s", GodanVerbEnding::Bu => "b", GodanVerbEnding::Ku => "k", GodanVerbEnding::Gu => "g", GodanVerbEnding::Nu => "n", GodanVerbEnding::Mu => "m", GodanVerbEnding::Ru => "r", GodanVerbEnding::Su => "s", GodanVerbEnding::Tsu => "t", GodanVerbEnding::U => "u", } .to_string() } } /// Implement TryFrom for VerbEnding impl TryFrom<&str> for GodanVerbEnding { type Error = (); fn try_from(value: &str) -> Result { Ok(match value { "aru" => GodanVerbEnding::Aru, "u-s" => GodanVerbEnding::USpecial, "uru" => GodanVerbEnding::Uru, "r-i" => GodanVerbEnding::RuIrreg, "k-s" => GodanVerbEnding::IkuYuku, _ => match &value[0..1] { "b" => GodanVerbEnding::Bu, "k" => GodanVerbEnding::Ku, "g" => GodanVerbEnding::Gu, "n" => GodanVerbEnding::Nu, "m" => GodanVerbEnding::Mu, "r" => GodanVerbEnding::Ru, "s" => GodanVerbEnding::Su, "t" => GodanVerbEnding::Tsu, "u" => GodanVerbEnding::U, _ => return Err(()), }, }) } } /// VerbEnding into String impl Into for VerbEnding { fn into(self) -> String { match self { VerbEnding::Bu => "b", VerbEnding::Dzu => "d", VerbEnding::Gu => "g", VerbEnding::Hu => "h", VerbEnding::Ku => "k", VerbEnding::Mu => "m", VerbEnding::Nu => "n", VerbEnding::Ru => "r", VerbEnding::Su => "s", VerbEnding::Tsu => "t", VerbEnding::U => "w", VerbEnding::Yu => "y", VerbEnding::Zu => "z", } .to_string() } } /// Implement TryFrom for VerbEnding impl TryFrom<&str> for VerbEnding { type Error = (); fn try_from(value: &str) -> Result { Ok(match value { "b" => VerbEnding::Bu, "d" => VerbEnding::Dzu, "g" => VerbEnding::Gu, "h" => VerbEnding::Hu, "k" => VerbEnding::Ku, "m" => VerbEnding::Mu, "n" => VerbEnding::Nu, "r" => VerbEnding::Ru, "s" => VerbEnding::Su, "t" => VerbEnding::Tsu, "w" => VerbEnding::U, "y" => VerbEnding::Yu, "z" => VerbEnding::Zu, _ => return Err(()), }) } } /// NidanVerb into String impl Into for NidanVerb { fn into(self) -> String { let class = match self.class { VerbClass::Upper => "k", VerbClass::Lower | VerbClass::None => "s", }; let ending: String = self.ending.into(); format!("{}-{}", ending, class) } } /// Implement TryFrom for NidanVerb impl TryFrom<&str> for NidanVerb { type Error = (); fn try_from(value: &str) -> Result { if value.len() < 3 || value[..1] != *"v" { return Err(()); } if value == "v2a-s" { return Ok(NidanVerb { ending: VerbEnding::U, class: VerbClass::None, }); } let class: VerbClass = match &value[4..5] { "k" => VerbClass::Upper, "s" => VerbClass::Lower, _ => return Err(()), }; let ending = VerbEnding::try_from(&value[2..3])?; Ok(NidanVerb { class, ending }) } } /// NounType into String impl Into for NounType { fn into(self) -> String { match self { NounType::Normal => "n", NounType::Adverbial => "n-adv", NounType::Prefix => "n-pref", NounType::Suffix => "n-suf", NounType::Temporal => "n-t", } .to_string() } } /// Implement TryFrom for NounType impl TryFrom<&str> for NounType { type Error = (); fn try_from(value: &str) -> Result { Ok(match &value[2..] { "adv" => NounType::Adverbial, "pref" => NounType::Prefix, "suf" => NounType::Suffix, "t" => NounType::Temporal, _ => return Err(()), }) } } impl Into for AdjectiveType { fn into(self) -> String { match self { AdjectiveType::PreNounVerb => "adj-f", AdjectiveType::Keiyoushi => "adj-i", AdjectiveType::KeiyoushiYoiIi => "adj-ix", AdjectiveType::Ku => "adj-ku", AdjectiveType::Na => "adj-na", AdjectiveType::Nari => "adj-nari", AdjectiveType::No => "adj-no", AdjectiveType::PreNoun => "adj-pn", AdjectiveType::Shiku => "adj-shiku", AdjectiveType::Taru => "adj-t", } .to_string() } } /// Implement TryFrom for AdjectiveType impl TryFrom<&str> for AdjectiveType { type Error = (); fn try_from(value: &str) -> Result { Ok(match value[4..].as_ref() { "f" => AdjectiveType::PreNounVerb, "i" => AdjectiveType::Keiyoushi, "ix" => AdjectiveType::KeiyoushiYoiIi, "ku" => AdjectiveType::Ku, "na" => AdjectiveType::Na, "nari" => AdjectiveType::Nari, "no" => AdjectiveType::No, "pn" => AdjectiveType::PreNoun, "shiku" => AdjectiveType::Shiku, "t" => AdjectiveType::Taru, _ => return Err(()), }) } } impl Into for PartOfSpeech { fn into(self) -> String { if let PartOfSpeech::Noun(noun) = self { return noun.into(); } match self { PartOfSpeech::Adjective(adj) => adj.into(), PartOfSpeech::Noun(noun) => noun.into(), PartOfSpeech::Verb(verb) => verb.into(), _ => match self { PartOfSpeech::Pronoun => "pn", PartOfSpeech::Adverb => "adv", PartOfSpeech::Auxilary => "aux", PartOfSpeech::Counter => "ctr", PartOfSpeech::Conjunction => "conj", PartOfSpeech::Expr => "exp", PartOfSpeech::Interjection => "int", PartOfSpeech::Numeric => "num", PartOfSpeech::Particle => "prt", PartOfSpeech::Suffix => "suf", PartOfSpeech::Unclassified => "unc", PartOfSpeech::AdverbTo => "adv-to", PartOfSpeech::AuxilaryAdj => "aux-adj", PartOfSpeech::AuxilaryVerb => "aux-v", PartOfSpeech::Prefix => "pref", PartOfSpeech::Sfx => "sfx", _ => unreachable!(), // already checked above } .to_string(), } } } /// Implement TryFrom for PartOfSpeech impl TryFrom<&str> for PartOfSpeech { type Error = (); fn try_from(value: &str) -> Result { Ok(match value { "n" => PartOfSpeech::Noun(NounType::Normal), "pn" => PartOfSpeech::Pronoun, "sfx" => PartOfSpeech::Sfx, "adv" => PartOfSpeech::Adverb, "aux" => PartOfSpeech::Auxilary, "ctr" => PartOfSpeech::Counter, "exp" => PartOfSpeech::Expr, "int" => PartOfSpeech::Interjection, "num" => PartOfSpeech::Numeric, "prt" => PartOfSpeech::Particle, "conj" => PartOfSpeech::Conjunction, "suf" => PartOfSpeech::Suffix, "unc" => PartOfSpeech::Unclassified, "adv-to" => PartOfSpeech::AdverbTo, "aux-adj" => PartOfSpeech::AuxilaryAdj, "aux-v" => PartOfSpeech::AuxilaryVerb, "pref" => PartOfSpeech::Prefix, _ => { if value.starts_with("n-") { return Ok(PartOfSpeech::Noun(NounType::try_from(value)?)); } if value.starts_with("adj") { return Ok(PartOfSpeech::Adjective(AdjectiveType::try_from(value)?)); } if value.starts_with('v') { return Ok(PartOfSpeech::Verb(VerbType::try_from(value)?)); } return Err(()); } }) } } ================================================ FILE: lib/types/src/jotoba/words/pitch/border.rs ================================================ /// An HTML border for redering pitches #[repr(u8)] pub enum Border { Left, Right, Top, Bottom, } impl Border { #[inline] pub fn get_class(&self) -> char { match self { Border::Left => 'l', Border::Right => 'r', Border::Top => 't', Border::Bottom => 'b', } } #[inline] pub fn horizontal(high: bool) -> Border { if high { Border::Top } else { Border::Bottom } } } /// Helper to build Border class strings pub struct BorderBuilder { inner: String, } impl BorderBuilder { #[inline] pub fn new(initial: Border) -> Self { let mut inner = String::with_capacity(3); inner.push(initial.get_class()); Self { inner } } #[inline] pub fn add(&mut self, border: Border) { self.inner.push(' '); self.inner.push(border.get_class()); } #[inline] pub fn build(self) -> String { self.inner } } ================================================ FILE: lib/types/src/jotoba/words/pitch/mod.rs ================================================ pub mod raw_data; pub mod border; use jp_utils::JapaneseExt; use serde::{Deserialize, Serialize}; /// Owned pitch entry of a word #[derive(Clone, Serialize, Deserialize, Debug)] pub struct Pitch { pub parts: Vec, } impl Pitch { pub fn new(kana: &str, drop: u8) -> Option { let mut kana_items = split_kana(kana).collect::>(); kana_items.push(""); let syllable_count = kana_items.len(); if syllable_count == 0 || drop > 6 { return None; } let mut kana_items = kana_items.into_iter(); let first_kana = kana_items.next()?; if drop == 0 || drop == 1 { if syllable_count == 1 { let inner = vec![PitchPart::new(first_kana, drop == 1)]; return Some(Self::new_raw(inner)); } else { let part1 = PitchPart::new(first_kana, drop == 1); let part2 = PitchPart::new(&kana[first_kana.bytes().len()..], drop == 0); return Some(Self::new_raw(vec![part1, part2])); } } let up: usize = kana_items .by_ref() .take((drop - 1) as usize) .map(|i| i.bytes().len()) .sum(); let parts = vec![ PitchPart::new(first_kana, false), PitchPart::new( &kana[first_kana.bytes().len()..first_kana.bytes().len() + up], true, ), PitchPart::new(&kana[first_kana.bytes().len() + up..], false), ]; return Some(Pitch::new_raw(parts)); } #[inline] fn new_raw(parts: Vec) -> Self { Self { parts } } /// Get a reference to the pitch's parts. #[inline] pub fn parts(&self) -> &[PitchPart] { self.parts.as_ref() } /// Render helper for the template #[cfg(feature = "jotoba_intern")] pub fn render(&self) -> impl Iterator { use self::border::{Border, BorderBuilder}; let mut iter = self.parts.iter().enumerate(); std::iter::from_fn(move || { let (pos, pitch_part) = iter.next()?; if pitch_part.part.is_empty() { // Don't render under/overline for empty character -- handles the case where the // pitch changes from the end of the word to the particle return Some((String::new(), "")); } let h_bord = Border::horizontal(pitch_part.high); let mut b_builder = BorderBuilder::new(h_bord); if pos != self.parts.len() - 1 { b_builder.add(Border::Right); } let classes = b_builder.build(); let part_str = pitch_part.part.as_str(); Some((classes, part_str)) }) } } /// A single, owned part of a whole pitch entry for a word #[derive(Clone, Serialize, Deserialize, Debug)] pub struct PitchPart { pub part: String, pub high: bool, } impl PitchPart { #[inline] pub fn new(part: S, high: bool) -> Self { Self { part: part.to_string(), high, } } } /// Returns an iterator over all kana characters. The reason for Item to be &str is that 'きゅう' /// gets split up into ["きゅ", "う"] which can't be represented with only one char pub fn split_kana(inp: &str) -> impl Iterator { let mut char_indices = inp.char_indices().peekable(); std::iter::from_fn(move || { let (start_idx, _) = char_indices.next()?; while let Some(&(next_idx, chr)) = char_indices.peek() { if !chr.is_small_kana() { return Some(&inp[start_idx..next_idx]); } char_indices.next(); } Some(&inp[start_idx..]) }) } #[cfg(test)] mod test { use super::*; #[test] fn test_split_katakana_small() { let inp = "ファイナル"; let out = split_kana(inp).collect::>(); assert_eq!(out, vec!["ファ", "イ", "ナ", "ル"]); } #[test] fn test_split_kana_small() { let inp = "きょうかしょ"; let out = split_kana(inp).collect::>(); assert_eq!(out, vec!["きょ", "う", "か", "しょ"]); } #[test] fn test_split_kana() { let inp = "これがすき"; let out = split_kana(inp).collect::>(); assert_eq!(out, vec!["こ", "れ", "が", "す", "き"]); } #[test] fn test_split_kana2() { let inp = ""; let out = split_kana(inp).collect::>(); let empty: Vec<&str> = Vec::new(); assert_eq!(out, empty); } } ================================================ FILE: lib/types/src/jotoba/words/pitch/raw_data.rs ================================================ use serde::{Deserialize, Serialize}; use std::fmt::Debug; /// Store for pitch values. There are max 4 pitch values with each 3 bits. This /// is why we store it efficiently in a u16 #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, Default)] pub struct PitchValues { raw: u16, } impl PitchValues { pub fn new(values: &[u8]) -> Self { assert!(values.len() <= 4); let mut raw: u16 = 0; for (pos, val) in values.iter().enumerate() { assert!(*val <= 6); let shift = pos as u16 * 3; raw |= (*val as u16) << shift; } raw |= (values.len() as u16) << 12; Self { raw } } #[inline] pub fn is_empty(&self) -> bool { self.raw == 0 } #[inline] pub fn count(&self) -> u8 { (self.raw >> 12) as u8 } #[inline] pub fn get(&self, pos: u8) -> Option { (pos < self.count()).then(|| (self.raw >> (pos as u16 * 3)) as u8 & 0b00000111) } #[inline] pub fn iter(&self) -> impl Iterator + '_ { (0..self.count()).map(|i| self.get(i).unwrap()) } } impl Debug for PitchValues { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "[")?; for (pos, p) in self.iter().enumerate() { if pos > 0 { write!(f, "|")?; } write!(f, "{p}")?; } write!(f, "]") } } #[cfg(test)] mod test { use super::*; #[test] fn test_pitch_value() { assert_eq!(PitchValues::new(&[0]).count(), 1); assert_eq!(PitchValues::new(&[0]).get(0), Some(0)); assert_eq!(PitchValues::new(&[0]).get(1), None); assert_eq!(PitchValues::new(&[6, 6]).count(), 2); assert_eq!(PitchValues::new(&[6, 6]).get(0), Some(6)); assert_eq!(PitchValues::new(&[6, 6]).get(1), Some(6)); assert_eq!(PitchValues::new(&[1, 6, 0]).count(), 3); assert_eq!(PitchValues::new(&[1, 6, 0]).get(2), Some(0)); assert_eq!(PitchValues::new(&[]).count(), 0); } } ================================================ FILE: lib/types/src/jotoba/words/priority.rs ================================================ use serde::{Deserialize, Serialize}; use std::convert::TryFrom; /// Priority indicator of kanji/reading element #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Hash, Eq)] #[repr(u8)] pub enum Priority { News(u8), Ichi(u8), Spec(u8), Gai(u8), Nf(u8), } impl Into for Priority { fn into(self) -> String { match self { Priority::News(v) => format!("news{}", v), Priority::Ichi(v) => format!("ichi{}", v), Priority::Spec(v) => format!("spec{}", v), Priority::Gai(v) => format!("gai{}", v), Priority::Nf(v) => format!("nf{}", v), } } } impl TryFrom<&str> for Priority { type Error = (); fn try_from(value: &str) -> Result { if let Some(end) = value.strip_prefix("news") { return Ok(Priority::News(end.parse().map_err(|_| ())?)); } if let Some(end) = value.strip_prefix("ichi") { return Ok(Priority::Ichi(end.parse().map_err(|_| ())?)); } if let Some(end) = value.strip_prefix("spec") { return Ok(Priority::Spec(end.parse().map_err(|_| ())?)); } if let Some(end) = value.strip_prefix("gai") { return Ok(Priority::Gai(end.parse().map_err(|_| ())?)); } if let Some(end) = value.strip_prefix("nf") { return Ok(Priority::Nf(end.parse().map_err(|_| ())?)); } Err(()) } } #[cfg(test)] mod test { use super::*; use std::convert::TryFrom; #[test] fn test_priority_ichi() { let s = Priority::try_from("ichi1"); assert!(s.is_ok()); let s = s.unwrap(); assert_eq!(s, Priority::Ichi(1)); let p: String = s.into(); assert_eq!(p, "ichi1"); let s = Priority::try_from("ichi"); assert!(s.is_err()); } #[test] fn test_priority_nf() { let s = Priority::try_from("nf10"); assert!(s.is_ok()); let s = s.unwrap(); assert_eq!(s, Priority::Nf(10)); let p: String = s.into(); assert_eq!(p, "nf10"); let s = Priority::try_from("nf4"); assert!(s.is_ok()); let s = s.unwrap(); assert_eq!(s, Priority::Nf(4)); let p: String = s.into(); assert_eq!(p, "nf4"); let s = Priority::try_from("nf"); assert!(s.is_err()); } #[test] fn test_priority_news() { let s = Priority::try_from("news10"); assert!(s.is_ok()); let s = s.unwrap(); assert_eq!(s, Priority::News(10)); let p: String = s.into(); assert_eq!(p, "news10"); let s = Priority::try_from("news"); assert!(s.is_err()); } } ================================================ FILE: lib/types/src/jotoba/words/reading/iter.rs ================================================ use super::Reading; use crate::jotoba::words::Dict; /// Iterator over all readings of a word pub struct ReadingIter<'a> { reading: &'a Reading, allow_kana: bool, did_kanji: bool, did_kana: bool, alternative_pos: u8, } impl<'a> ReadingIter<'a> { #[inline] pub(crate) fn new(reading: &'a Reading, allow_kana: bool) -> Self { Self { reading, allow_kana, did_kana: false, did_kanji: false, alternative_pos: 0, } } } impl<'a> Iterator for ReadingIter<'a> { type Item = &'a Dict; fn next(&mut self) -> Option { if !self.did_kana && self.allow_kana { self.did_kana = true; return Some(&self.reading.kana); } if !self.did_kanji && self.reading.kanji.is_some() { self.did_kanji = true; return Some(self.reading.kanji.as_ref().unwrap()); } let i = self .reading .alternative .get(self.alternative_pos as usize)?; self.alternative_pos += 1; Some(i) } } ================================================ FILE: lib/types/src/jotoba/words/reading/mod.rs ================================================ pub mod iter; pub use iter::ReadingIter; use super::Dict; use jp_utils::JapaneseExt; use serde::{Deserialize, Serialize}; /// Various readings of a word #[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, Hash, Eq)] pub struct Reading { pub kana: Dict, pub kanji: Option, pub alternative: Vec, } impl Reading { /// Returns the preferred word-reading of a `Reading` #[inline] pub fn get_reading(&self) -> &Dict { self.kanji.as_ref().unwrap_or(&self.kana) } /// Returns an iterator over all reading elements #[inline] pub fn iter(&self, allow_kana: bool) -> ReadingIter<'_> { ReadingIter::new(self, allow_kana) } /// Return `true` if reading represents a katakana only word #[inline] pub fn is_katakana(&self) -> bool { self.kana.reading.is_katakana() && self.kanji.is_none() } } ================================================ FILE: lib/types/src/jotoba/words/sense.rs ================================================ use crate::jotoba::language::Language; use super::{ dialect::Dialect, field::Field, foreign_language::ForeignLanguage, gtype::GType, misc::Misc, part_of_speech::{PartOfSpeech, PosSimple}, Word, }; use serde::{Deserialize, Serialize}; #[cfg(feature = "jotoba_intern")] use localization::{language::Language as LocLanguage, traits::Translatable, TranslationDict}; /// A single sense for a word. Represents one language, /// one misc item and 1..n glosses #[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, Hash)] pub struct Sense { pub id: u8, pub misc: Option, pub field: Option, pub dialect: Option, pub glosses: Vec, pub xref: Option, pub antonym: Option, pub information: Option, pub part_of_speech: Vec, pub language: Language, pub example_sentence: Option, pub gairaigo: Option, } #[derive(Debug, Default, Clone, PartialEq, Deserialize, Serialize, Hash)] pub struct Gairaigo { pub language: ForeignLanguage, pub fully_derived: bool, pub original: String, } impl Eq for Sense {} /// A gloss value represents one word in the /// translated language. #[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, Hash)] pub struct Gloss { pub id: u8, pub gloss: String, pub g_type: Option, } /// Converts sense and gloss id to a single u16 #[inline] pub fn to_unique_id(sense_id: u8, gloss_id: u8) -> u16 { (sense_id as u16) << 8 | gloss_id as u16 } /// Converts u16 to seq and gloss id #[inline] pub fn from_unique_id(id: u16) -> (u8, u8) { let gloss_id = id as u8; let sense_id = (id >> 8) as u8; (sense_id, gloss_id) } impl Sense { /// Get all pos_simple of a sense pub fn get_pos_simple(&self) -> Vec { let mut pos_simple = self .part_of_speech .iter() .map(|i| i.to_pos_simple()) .flatten() .collect::>(); pos_simple.sort_unstable(); pos_simple.dedup(); pos_simple } #[inline] pub fn has_pos_simple(&self, s: &PosSimple) -> bool { //self.get_pos_simple().contains(s) self.part_of_speech .iter() .any(|p| p.to_pos_simple().contains(s)) } #[inline] pub fn gloss_by_id(&self, id: u8) -> Option<&Gloss> { self.glosses.iter().find(|i| i.id == id) } } // Jotoba intern only features #[cfg(feature = "jotoba_intern")] impl Sense { /// Get a senses tags prettified #[inline] pub fn get_glosses(&self) -> String { use itertools::Itertools; self.glosses.iter().map(|i| i.gloss.clone()).join("; ") } /// Returns an `xref` of the sense if available #[inline] pub fn get_xref(&self) -> Option<&str> { self.xref.as_ref().and_then(|xref| xref.split('・').next()) } /// Returns an `antonym` of the sense if available #[inline] pub fn get_antonym(&self) -> Option<&str> { self.antonym .as_ref() .and_then(|antonym| antonym.split('・').next()) } // Get a senses tags prettified pub fn get_parts_of_speech(&self, dict: &TranslationDict, language: LocLanguage) -> String { use itertools::Itertools; self.part_of_speech .iter() .map(|i| i.gettext_custom(dict, Some(language))) .join(", ") } pub fn get_infos( &self, dict: &TranslationDict, language: LocLanguage, ) -> Option<( Option, Option<&str>, Option<&str>, Option, Option, )> { let info_str = self.get_information_string(dict, language); let xref = self.get_xref(); let antonym = self.get_antonym(); let dialect = self.dialect; if xref.is_none() && info_str.is_none() && antonym.is_none() && self.gairaigo.is_none() { None } else { let gairaigo_txt = self.get_gairaigo(dict, language); Some((info_str, xref, antonym, dialect, gairaigo_txt)) } } fn get_gairaigo(&self, dict: &TranslationDict, language: LocLanguage) -> Option { self.gairaigo.as_ref().map(|gairaigo| { let lang = gairaigo .language .pgettext(dict, "foreign_lang", Some(language)); dict.gettext_fmt("From {}: {}", &[lang, &gairaigo.original], Some(language)) }) } /// Return human readable information about a gloss pub fn get_information_string( &self, dict: &TranslationDict, language: LocLanguage, ) -> Option { use itertools::Itertools; let arr: [Option; 3] = [ self.misc .map(|i| i.gettext(dict, Some(language)).to_owned()), self.field.map(|i| i.gettext_custom(dict, Some(language))), self.information.clone(), ]; let res = arr .iter() .filter_map(|i| i.is_some().then(|| i.as_ref().unwrap())) .collect::>(); if res.is_empty() { return None; } if self.xref.is_some() || self.antonym.is_some() { Some(format!("{}.", res.iter().join(", "))) } else { Some(res.iter().join(", ")) } } } /// Iterator over all Senses and its glosses pub struct SenseGlossIter<'a> { word: &'a Word, sense_pos: usize, gloss_pos: usize, } impl<'a> SenseGlossIter<'a> { #[inline] pub(super) fn new(word: &'a Word) -> Self { SenseGlossIter { word, sense_pos: 0, gloss_pos: 0, } } } impl<'a> Iterator for SenseGlossIter<'a> { type Item = (&'a Sense, &'a Gloss, u16); fn next(&mut self) -> Option { let senses = &self.word.senses; if senses.len() <= self.sense_pos { return None; } let sense = &senses[self.sense_pos]; assert!(!sense.glosses.is_empty()); let gloss = &sense.glosses[self.gloss_pos]; self.gloss_pos += 1; if self.gloss_pos >= sense.glosses.len() { self.gloss_pos = 0; self.sense_pos += 1; } let id = to_unique_id(sense.id, gloss.id); Some((sense, gloss, id)) } } #[cfg(test)] mod test { use super::*; fn make_gloss(word: &str) -> Gloss { Gloss { gloss: word.to_string(), ..Default::default() } } fn make_word(senses: &[&[&str]]) -> Word { let built_senses = senses .iter() .map(|sense| Sense { glosses: sense.iter().map(|i| make_gloss(i)).collect(), ..Default::default() }) .collect::>(); Word { senses: built_senses, ..Default::default() } } #[test] fn test_sense_gloss_iter() { let word_empty = make_word(&[]); assert_eq!(word_empty.sense_gloss_iter().next(), None); let test_word = |data: &[&[&str]]| { let word1 = make_word(data); let mut iter1 = word1.sense_gloss_iter(); for i in data.into_iter().map(|i| i.iter()).flatten() { assert_eq!(iter1.next().unwrap().1.gloss.as_str(), *i); } assert_eq!(iter1.next(), None); }; let words = vec![ vec![&["gloss0_0"][..]], vec![&["gloss0_0"][..], &["gloss1_0"][..]], vec![&["gloss0_0", "gloss0_1"][..], &["gloss1_0", "gloss1_1"][..]], ]; for word in words { test_word(&word); } } #[test] fn test_unique_id() { let pairs = &[(1, 70), (10, 6), (0, 0), (255, 255), (1, 2)]; for (seq, gloss) in pairs { let enc = to_unique_id(*seq, *gloss); let (seq_res, gloss_res) = from_unique_id(enc); assert_eq!(*seq, seq_res); assert_eq!(*gloss, gloss_res); } } } ================================================ FILE: lib/types/src/lib.rs ================================================ /// Contains raw data structures used for parsing and generating the 'real' resources #[cfg(feature = "raw_types")] pub mod raw; /// Contains all information holding structures for jotoba resources pub mod jotoba; /// Contains all structures and informations required for the API #[cfg(feature = "api")] pub mod api; ================================================ FILE: lib/types/src/raw/jmdict/mod.rs ================================================ use crate::jotoba::{ language::Language, words::{ dialect::Dialect, field::Field, gtype::GType, information::Information, misc::Misc, part_of_speech::PartOfSpeech, priority::Priority, sense::Gairaigo, }, }; use serde::{Deserialize, Serialize}; /// An dict entry. Represents one word, phrase or expression #[derive(Debug, Default, Clone)] pub struct Entry { pub sequence: u32, /// Different readings of a word pub elements: Vec, /// Translations into various languages pub senses: Vec, } /// A single element for an entry. Defines reading, kanji and additional /// information for the japanese word #[derive(Debug, Default, Clone)] pub struct EntryElement { /// Is kanji reading pub kanji: bool, /// The reading pub value: String, pub priorities: Vec, pub reading_info: Vec, pub no_true_reading: bool, } /// A single 'sense' item for an entry #[derive(Debug, Default, Clone)] pub struct EntrySense { pub id: u8, pub glosses: Vec, pub misc: Option, pub part_of_speech: Vec, pub antonym: Option, pub field: Option, pub xref: Option, pub dialect: Option, pub information: Option, pub gairaigo: Option, pub example_sentence: Option, } impl EntrySense { pub fn clear(&mut self) { self.glosses.clear(); if let Some(ref mut ant) = self.antonym { ant.clear(); self.antonym = None; } if let Some(ref mut information) = self.information { information.clear(); self.information = None; } if let Some(ref mut xref) = self.xref { xref.clear(); self.xref = None; } self.field = None; self.dialect = None; self.misc = None; self.part_of_speech.clear(); self.example_sentence = None; self.gairaigo = None; } } #[derive(Debug, Default, Clone, PartialEq, Deserialize, Serialize, Hash)] pub struct Translation { pub language: Language, pub value: String, } /// A single gloss entry. #[derive(Debug, Clone, PartialEq)] pub struct GlossValue { pub language: Language, pub g_type: Option, pub value: String, } ================================================ FILE: lib/types/src/raw/jmnedict/mod.rs ================================================ use crate::jotoba::names::name_type::NameType; /// An dict entry. Represents one word, phrase or expression #[derive(Default)] pub struct NameEntry { pub sequence: i32, pub kana_element: String, pub kanji_element: Option, pub transcription: String, pub name_type: Option>, pub xref: Option, } ================================================ FILE: lib/types/src/raw/kanjidict/mod.rs ================================================ /// An kanji character. Represents one Kanji #[derive(Default, Clone, Debug)] pub struct Character { pub literal: char, pub on_readings: Vec, pub kun_readings: Vec, pub chinese_readings: Vec, pub korean_romanized: Vec, pub korean_hangul: Vec, pub vietnamese: Vec, pub meaning: Vec, pub grade: Option, pub stroke_count: u8, pub variant: Vec, pub frequency: Option, pub jlpt: Option, pub natori: Vec, pub radical: Option, } ================================================ FILE: lib/types/src/raw/mod.rs ================================================ pub mod jmdict; pub mod jmnedict; pub mod kanjidict; ================================================ FILE: lib/utils/Cargo.toml ================================================ [package] name = "utils" version = "0.1.0" authors = ["jojii "] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] itertools = "0.11.0" rand = "0.8.5" sabi = { git = "https://github.com/yuk1ty/sabi" } ================================================ FILE: lib/utils/src/binary_search.rs ================================================ use std::cmp::Ordering; pub struct ResultIter<'a, C, B, T> where C: FnMut(&T) -> Ordering + Copy, B: BinarySearchable, { cmp_fn: C, first: Option, item_pos: usize, find: &'a B, } impl<'a, C, B, T> Iterator for ResultIter<'a, C, B, T> where C: FnMut(&T) -> Ordering + Copy, B: BinarySearchable, { type Item = T; #[inline] fn next(&mut self) -> Option { let curr_item_pos = self.first? + self.item_pos; if curr_item_pos >= self.find.len() { return None; } let item = self.find.get(curr_item_pos); if (self.cmp_fn)(&item) == Ordering::Equal { self.item_pos += 1; return Some(item); } None } } impl<'a, C, B, T> ResultIter<'a, C, B, T> where C: FnMut(&T) -> Ordering + Copy, B: BinarySearchable, { #[inline] pub(crate) fn new(cmp: C, search: &'a B, first: Option) -> Self { Self { cmp_fn: cmp, first, item_pos: 0, find: search, } } } /// A trait providing binary search for all `get` and `len` implementing types. Additionally /// `search` can be used to retrieve all matching items in sorted order. pub trait BinarySearchable: Sized { type Item: Sized; fn get(&self, pos: usize) -> Self::Item; fn len(&self) -> usize; fn is_empty(&self) -> bool { self.len() == 0 } /// Returns an iterator over each matching result fn search(&self, cmp: C) -> ResultIter<'_, C, Self, Self::Item> where C: FnMut(&Self::Item) -> Ordering + Copy, { let first_item = self.find_first(cmp); ResultIter::new(cmp, self, first_item) } fn binary_search_by<'a, F>(&'a self, mut f: F) -> Option where F: FnMut(&Self::Item) -> Ordering, { let mut size = self.len(); let mut left = 0; let mut right = size; while left < right { let mid = left + size / 2; let cmp = f(&self.get(mid)); if cmp == Ordering::Less { left = mid + 1; } else if cmp == Ordering::Greater { right = mid; } else { return Some(mid); } size = right - left; } None } /// Finds first matching item fn find_first(&self, mut cmp: C) -> Option where C: FnMut(&Self::Item) -> Ordering, { // Find using binary search. If multiple results found (which is very likely the case in // our implementation), a random item of the matching ones will be found let random_index = self.binary_search_by(|a| cmp(a))?; let mut curr_pos = random_index.saturating_sub(100); loop { if cmp(&self.get(curr_pos)) != Ordering::Equal { loop { curr_pos += 1; if cmp(&self.get(curr_pos)) == Ordering::Equal { break; } } break Some(curr_pos); } if curr_pos == 0 { break None; } curr_pos = curr_pos.saturating_sub(200); } } } ================================================ FILE: lib/utils/src/korean.rs ================================================ /// Returns true if `c` is a hangul character #[inline] pub fn is_hangul(c: char) -> bool { (c >= '\u{AC00}' && c <= '\u{D7AF}') || (c >= '\u{1100}' && c <= '\u{11FF}') || (c >= '\u{3130}' && c <= '\u{321E}') } sabi::sabi! { /// Returns true if `c` is a hangul character #[inline] 公開 関数 is_hangul_str(ハングルの文字列: &str) -> bool{ !ハングルの文字列.chars().any(|i| !is_hangul(i)) } } ================================================ FILE: lib/utils/src/lib.rs ================================================ pub mod binary_search; pub mod korean; use itertools::Itertools; use rand::{distributions::Alphanumeric, thread_rng, Rng}; use std::cmp::Ordering; /// Return true if both slices have the same elments without being stored to be in the same order pub fn same_elements(v1: &[T], v2: &[T]) -> bool where T: PartialEq, { if v1.len() != v2.len() { return false; } for i in v1 { if !v2.contains(i) { return false; } } true } /// Return true if `v1` ⊆ `v2` pub fn part_of(v1: &[T], v2: &[T]) -> bool where T: PartialEq, { if v1.len() > v2.len() || v1.is_empty() { return false; } for i in v1 { if !v2.contains(i) { return false; } } true } /// Get the relative order of two elements within a vector requires that a, b being element of vec pub fn get_item_order(vec: &[T], a: &T, b: &T) -> Option where T: PartialEq, { if a == b { return Some(Ordering::Equal); } for i in vec { if *i == *a { return Some(Ordering::Less); } if *i == *b { return Some(Ordering::Greater); } } None } /// Returns the real amount of characters in a string #[inline] pub fn real_string_len>(s: S) -> usize { // We should probably use grapheme clusters here s.as_ref().chars().count() } /// Returns an antisymmetric ordering of [`a`] and [`b`] where `a == true` < `b == true` /// Example: /// /// let a = true; /// let b = false; /// assert_eq!(bool_ord(a, b), Ordering::Less); #[inline] pub fn bool_ord(a: bool, b: bool) -> Ordering { if a && !b { Ordering::Less } else if !a && b { Ordering::Greater } else { Ordering::Equal } } /// Returns `None` if the vec is empty or Some(Vec) if not #[inline] pub fn to_option(vec: Vec) -> Option> { (!vec.is_empty()).then(|| vec) } /// Returns an ordering based on the option variants. /// Ordering: Some < None /// In case both are equal, None gets returned pub fn option_order(a: &Option, b: &Option) -> Option { if a.is_some() && !b.is_some() { Some(Ordering::Less) } else if !a.is_some() && b.is_some() { Some(Ordering::Greater) } else { None } } /// Remove duplicates from a vector and return a newly allocated one using a func to compare both /// items. This doesn't need the source /// vector to be sorted unlike `.dedup()`. Therefore it's heavier in workload pub fn remove_dups_by(inp: Vec, eq: F) -> Vec where T: PartialEq, F: Fn(&T, &T) -> bool, { let mut new: Vec = Vec::new(); for item in inp { if !contains(&new, &item, &eq) { new.push(item) } } new } pub fn contains(inp: &[T], item: &T, eq: F) -> bool where F: Fn(&T, &T) -> bool, { for i in inp { if eq(i, item) { return true; } } false } /// Remove duplicates from a vector and return a newly allocated one. This doesn't need the source /// vector to be sorted unlike `.dedup()`. Therefore it's heavier in workload pub fn remove_dups(inp: Vec) -> Vec where T: PartialEq, { let mut new = vec![]; for item in inp { if !new.contains(&item) { new.push(item) } } new } /// Returns an iterator over bools for each [`substr`] within [`text`] with the value `true` if the /// given substr occurence is within [`open`] and [`close`] or not /// /// Example: /// /// is_surrounded_by(r#"this "is" an example"#, "is", '"','"') /// /// => will return an iterator over [ false, true ] /// pub fn is_surrounded_by<'a>( text: &'a str, substr: &'a str, open: char, close: char, ) -> impl Iterator + 'a { // Counter for amount of nested brackets let mut counter = 0; let mut text_iter = text.char_indices().multipeek(); std::iter::from_fn(move || { // Retard case no valid bracketing is possible if substr.len() + 2 <= text.len() || substr.contains(open) || substr.contains(close) { return None; } 'b: while let Some((_, c)) = text_iter.next() { if c == open { counter += 1; continue; } if c == close { counter -= 1; continue; } // Match each character of [`substr`] against the next appearing characters in [`text`] by // peeking [`text_iter`] Aka string matching for (pos, sub_char) in substr.chars().enumerate() { let text_char = if pos == 0 { // Check first substr char against current char c } else { // For later appearing characters, peek into the future match text_iter.peek().map(|i| i.1) { Some(c) => c, None => return None, } }; // On the first not matching character, continue loop and reset peek if sub_char.to_ascii_lowercase() != text_char.to_ascii_lowercase() { text_iter.reset_peek(); continue 'b; } } // Skip peeked items if maching substr was found text_iter.reset_peek(); for _ in 0..substr.chars().count() - 1 { text_iter.next(); } // Only reaches this part if a matching substring was found return Some(counter > 0); } None }) } #[inline] pub fn trim_string_end(mut s: String) -> String { while s.ends_with(' ') { s.pop(); } s } /// Returns true if [`s`] represents [`c`] pub fn char_eq_str(c: char, s: &str) -> bool { let mut chars = s.chars(); let is = chars.next().map(|i| i == c).unwrap_or_default(); is && chars.next().is_none() } sabi::sabi! { /// Makes the first character to uppercase and returns a newly owned string 公開 関数 first_letter_upper(s: &str) -> 文字列{ 束縛 可変 c = s.chars(); マッチ c.next(){ ない => 文字列::新(), ある(f) => f.to_uppercase().chain(c).collect(), } } } /// Returns a random alpha numeric string with the length of [`len`] #[inline] pub fn rand_alpha_numeric(len: usize) -> String { thread_rng() .sample_iter(&Alphanumeric) .take(len) .map(char::from) .collect() } /// Formats romaji text by removing all 'n' occurences of n+ for 1 < |n| <= 4 #[inline] pub fn format_romaji_nn(inp: &str) -> String { inp.replace("nn", "ん") .replace("n'", "ん") .replace("nnn", "nn") .replace("nnnn", "nnn") .replace("nnnnn", "nnnn") } ================================================ FILE: locales/de.po ================================================ # SINGULAR # msgctxt "" # msgid "" # msgstr "" # PLURAL # msgctxt "" # msgid "" # msgid_plural "" # msgstr[0] "" # OPTIONAL: msgstr[1] "" msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "PO-Revision-Date: 2021-11-29 21:46+0100\n" "Last-Translator: <>\n" "Language-Team: English\n" "Language: en\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" msgid "Jotoba" msgstr "" #### Base template msgid "Search..." msgstr "Suche..." msgid "Search" msgstr "Suchen" msgid "Settings" msgstr "Einstellungen" msgid "Radicals" msgstr "Radikale" msgid "Voice" msgstr "Stimme" # Rad Help Msg msgid "This tool allows you to find Kanji by their core components (Radicals)" msgstr "Dieses Tool erlaubt es dir Kanji anhand ihrer Kernelemente (Radikale) zu finden" msgid "You can select Radicals below and add found Kanji to the search bar" msgstr "Du kannst unten gewünschte Radikale auswählen und die gefunden Kanji der Suchleiste hinzufügen" msgid "Enter" msgstr "" msgid "to start searching" msgstr "zum Starten der Suche" # Rad Btns msgid "Reset" msgstr "zurücksetzen" msgid "reset" msgstr "zurücksetzen" msgid "apply" msgstr "Annehmen" msgid "create" msgstr "erstellen" msgid "Close" msgstr "Schließen" msgid "here" msgstr "hier" msgid "Accept" msgstr "Annehmen" msgid "Decline" msgstr "Ablehnen" # Search type / Dropdown msgid "Words" msgstr "Wörter" msgid "Kanji" msgstr "Kanji" msgid "Sentences" msgstr "Sätze" msgid "Names" msgstr "Namen" # Speech overlay msgid "Current language" msgstr "Aktuelle Sprache" msgid "Currently listening" msgstr "Am Zuhören" msgid "No" msgstr "Nein" msgid "To change your language, select one of the following" msgstr "Um deine Sprache zu wechseln, wähle eins der folgenden" # Image search overlay msgid "Enter a URL or upload your image directly and Jotoba will try to search for japanese words contained in the picture." msgstr "Lade ein Bild hoch oder füge direkt die URL ein ein und Jotoba wird versuchen nach Japanischen Begriffen zu suchen, welche in dem Bild zu sehen sind." msgid "Enter Image URL..." msgstr "Bild-URL einfügen..." # Rad Picker overlay msgid "Select Radicals" msgstr "Wähle Radikale" msgid "Select Kanji" msgstr "Wähle Kanji" msgid "Search Radicals..." msgstr "Suche Radikale..." # Notification overlay msgid "Notifications" msgstr "Benachrichtigungen" msgid "No new notifications" msgstr "Keine Benachrichtigungen" msgid "Show all" msgstr "Zeige alle" # Languages msgid "English" msgstr "Englisch" msgid "German" msgstr "Deutsch" msgid "Russian" msgstr "Russisch" msgid "Spanish" msgstr "Spanisch" msgid "Swedish" msgstr "Schwedisch" msgid "French" msgstr "Französisch" msgid "Dutch" msgstr "Niederländisch" msgid "Hungarian" msgstr "Ungarisch" msgid "Slovenian" msgstr "Slovenisch" msgid "Japanese" msgstr "Japanisch" msgctxt "foreign_lang" msgid "German" msgstr "Deutschen" msgctxt "foreign_lang" msgid "English" msgstr "Englischen" msgctxt "foreign_lang" msgid "Georgian" msgstr "Georgischen" msgctxt "foreign_lang" msgid "Chinese" msgstr "Chinesischen" msgctxt "foreign_lang" msgid "Manchu" msgstr "Mandschurischen" msgctxt "foreign_lang" msgid "Kurdish" msgstr "Kurdischen" msgctxt "foreign_lang" msgid "ChinookJargon" msgstr "Mandschurischen" msgctxt "foreign_lang" msgid "Italian" msgstr "Italienischen" msgctxt "foreign_lang" msgid "Malayalam" msgstr "Malayalamischen" msgctxt "foreign_lang" msgid "Tibetian" msgstr "Tibetischen" msgctxt "foreign_lang" msgid "Mongolian" msgstr "Mongolischen" msgctxt "foreign_lang" msgid "Romanian" msgstr "Rumänischen" msgctxt "foreign_lang" msgid "Bantu" msgstr "Bantusprachischen" msgctxt "foreign_lang" msgid "Norwegian" msgstr "Norwegischen" msgctxt "foreign_lang" msgid "Icelandic" msgstr "Isländischen" msgctxt "foreign_lang" msgid "Breton" msgstr "Bretonischen" msgctxt "foreign_lang" msgid "Maori" msgstr "Maorischen" msgctxt "foreign_lang" msgid "Latin" msgstr "Lateinischen" msgctxt "foreign_lang" msgid "Amharic" msgstr "Amharischen" msgctxt "foreign_lang" msgid "Khmer" msgstr "Khmerischen" msgctxt "foreign_lang" msgid "Swahili" msgstr "Swahilischen" msgctxt "foreign_lang" msgid "Hebrew" msgstr "Hebräischen" msgctxt "foreign_lang" msgid "Galician" msgstr "Galegischen" msgctxt "foreign_lang" msgid "Korean" msgstr "Koreanischen" msgctxt "foreign_lang" msgid "Tamil" msgstr "Tamilschen" msgctxt "foreign_lang" msgid "Viatnamese" msgstr "Vietnamesischen" msgctxt "foreign_lang" msgid "Polish" msgstr "Polnischen" msgctxt "foreign_lang" msgid "Sanskrit" msgstr "Sanskrit" msgctxt "foreign_lang" msgid "Persian" msgstr "Persischen" msgctxt "foreign_lang" msgid "Filipino" msgstr "Filipinischen" msgctxt "foreign_lang" msgid "Moldavian" msgstr "Moldavischen" msgctxt "foreign_lang" msgid "Croatian" msgstr "Kroatischen" msgctxt "foreign_lang" msgid "Thai" msgstr "Thailändischen" msgctxt "foreign_lang" msgid "Burmese" msgstr "Birmanischen" msgctxt "foreign_lang" msgid "Slovak" msgstr "Slowakischen" msgctxt "foreign_lang" msgid "Czech" msgstr "Tschechischen" msgctxt "foreign_lang" msgid "Hindi" msgstr "Hindischen" msgctxt "foreign_lang" msgid "Mapudungun" msgstr "Araukaischen" msgctxt "foreign_lang" msgid "Turkish" msgstr "Türkischen" msgctxt "foreign_lang" msgid "Hawaiian" msgstr "Hawaiischen" msgctxt "foreign_lang" msgid "Afrikaans" msgstr "Afrikanischen" msgctxt "foreign_lang" msgid "Esperanto" msgstr "Esperantonischen" msgctxt "foreign_lang" msgid "Yiddish" msgstr "Jiddischen" msgctxt "foreign_lang" msgid "Somali" msgstr "Somalischen" msgctxt "foreign_lang" msgid "Tahitian" msgstr "Tahitischen" msgctxt "foreign_lang" msgid "Urdu" msgstr "Urduischen" msgctxt "foreign_lang" msgid "Indonesian" msgstr "Indonesischen" msgctxt "foreign_lang" msgid "Estonian" msgstr "Estnischen" msgctxt "foreign_lang" msgid "Bullgarian" msgstr "Bulgarischen" msgctxt "foreign_lang" msgid "Arabic" msgstr "Arabischen" msgctxt "foreign_lang" msgid "Danish" msgstr "Dänischen" msgctxt "foreign_lang" msgid "Portuguese" msgstr "Portugiesischen" msgctxt "foreign_lang" msgid "Greek" msgstr "Griechischen" msgctxt "foreign_lang" msgid "Finnish" msgstr "Finnischen" msgctxt "foreign_lang" msgid "Ainu" msgstr "Ainuischen" msgctxt "foreign_lang" msgid "Algonquian" msgstr "Algonkinischen" msgctxt "foreign_lang" msgid "French" msgstr "Französischen" # Settings msgid "Language" msgstr "Sprache" msgid "Display" msgstr "Design" msgid "General" msgstr "Allgemein" msgid "Default search language" msgstr "Sprache für Suchanfragen" msgid "Page language" msgstr "Sprache der Webseite" msgid "Always show english results" msgstr "Zeige englische Ergebnisse immer" msgid "Show english results on top" msgstr "Zeige englische Ergebnisse als erstes" msgid "Focus search bar on load" msgstr "Fokussiere die Suchleiste nach dem Suchen" msgid "Select input on load" msgstr "Markiere den Input nach dem Suchen" msgid "Results per page" msgstr "Ergebnisse pro Seite" msgid "Number..." msgstr "Nummer..." msgid "Input has to be in range of 1 and 100!" msgstr "Eingabe muss zwischen 1 und 100 liegen!" msgid "max amount of names/words/sentences shown per page" msgstr "Anzahl der Namen/Wörter/Sätze auf einer Seite" msgid "Show Furigana" msgstr "Zeige Furigana" msgid "Show example sentences" msgstr "Zeige Beispielsätze" msgid "Items per page" msgstr "Anzahl pro Seite" msgid "max amount of kanji shown per page" msgstr "Anzahl der Kanji auf einer Seite" msgid "Use dark mode" msgstr "Dark Mode" msgid "Show kanji-animation on load" msgstr "Zeige Kanji-Animation vollständig" msgid "Show kanji-animation numbers" msgstr "Nummeriere Zeichenreihenfolge" msgid "Default kanji animation speed" msgstr "Kanji Animationsgeschwindigkeit" msgid "Enable Quick-Copy" msgstr "\"Quick-Copy\" aktivieren" msgid "Share usage statistics" msgstr "Nutzungsstatistiken teilen" msgid "STATISTICS_EXPLANATION" msgstr "Um Jotoba zu verbessern, sammeln wir vollständig anonymisierte Daten zur Webseitennutzung. Du kannst jedoch jederzeit aus der Sammlung aussteigen." # Cookie text msgid "To use this feature you have to accept to the use of cookies." msgstr "Um dieses Feature nutzen zu können, musst du den Cookies zustimmen." msgid "Your data will only be used for your personal website settings." msgstr "Deine Daten werden ausschließlich für deine persönlichen Website Einstellungen verwendet." # Prefix of cookie revoke text msgid "Click" msgstr "Klicke" # Suffix of Cookie revoke text msgid "to revoke your Cookies agreement" msgstr "um deine Cookie Zustimmung zurückzunehmen" # Suffix of Cookie agree text msgid "to enable Cookies" msgstr "um Cookies zu aktivieren" # Footer msgid "Jotoba wouldn't be able to exist without the help of many open-source data sources." msgstr "Jotoba würde ohne die Hilfe der Hilfe von vielen Open-Source Quellen nicht existieren" msgid "About Page" msgstr "\"Über Uns\" - Seite" # Cookie Footer msgid "We use cookies to improve your experience and deliver personalized content." msgstr "Wir benutzen Cookies um Jotoba zu verbessern und Deinen Inhalt zu personalisieren." msgid "By using Jotoba you agree to our" msgstr "Indem du Jotoba benutzt, stimmst du unserer" msgid "privacy policy" msgstr "Datenschutzerklärung" msgid "." msgstr "zu." msgid "Only use necessary" msgstr "Nur notwendige verwenden" msgid "Allow Cookies" msgstr "alle Cookies erlauben" ### About Page msgid "is a multilingual Japanese dictionary" msgstr "ist ein mehrsprachiges Japanisch-Wörterbuch" msgid "It is easy to find translations for words or kanji, see example sentences and the way names can be written." msgstr "Es ist einfach Übersetzungen für Wörter oder Kanji, aber auch Beispielsätze und Namen zu finden." msgid "Here are some examples on how to use this page" msgstr "Hier sind einige Beispiele, wie man die Webseite benutzen kann" msgid "Quickly change the search type by pressing" msgstr "Ändere deinen Suchtypen mit diesen Tasten:" msgid "You can specify your search by typing" msgstr "Suche nach spezifischen Ergebnissen mit:" msgid "You can find verbs that are conjugated" msgstr "Du kannst Konjugierte Verben suchen" msgid "You can search multiple kanji at once" msgstr "Suche mehrere Kanji auf einmal" msgid "is open source" msgstr "ist Open Source" msgid "Check out our" msgstr "Besuche unsere" msgid "Check out the" msgstr "Besuche unsere" msgid "aswell if you are interested in upcoming features and what we are currently working on" msgstr ", wenn du an zukünftigen Features interessiert bist oder dich interessiert an welchen Features wir aktuell arbeiten" msgid "for a list of all contributors in this project." msgstr "für eine Liste aller Mitwirkenden an diesem Projekt." msgctxt "index" msgid "or" msgstr "oder" msgid "Press" msgstr "Drücke" msgid "to instantly focus the search bar" msgstr "um die Suchleiste zu fokussieren" ### Info / Help Page msgid "Shortcuts" msgstr "Tastenkombinationen" msgid "To improve the quality of life on Jotoba, we offer some shortcuts to quickly navigate the page:" msgstr "Um die Nutzung von Jotoba so einfach wie möglich zu machen, bieten wir einige Shortcuts an mit denen man schnell auf der Seite navigieren kann:" msgid "Everywhere" msgstr "Überall verfügbar" msgid "Quickly change between words | sentences | names | kanji tabs" msgstr "Wechsle schnell zwischen Wörtern | Sätzen | Namen | Kanji Suchen" msgid "Focus the search bar" msgstr "Fokussiere die Suchleiste" msgid "Focussed search bar" msgstr "Wenn die Suchleiste im Fokus liegt" msgid "Iterate suggestions up | down" msgstr "Iteriere Vorschläge hoch | herunter" msgid "Iterate suggestions down" msgstr "Iteriere Vorschläge herunter" msgid "[Words] search" msgstr "[Wörter] Suche" msgid "Play the first possible audio" msgstr "Spiele die Audio vom ersten Ergebnis ab" msgid "[Kanji] search" msgstr "[Kanji] Suche" msgid "Show / Collapse compounds" msgstr "Zeige / Verstecke Wortverbindungen" msgid "To specify what kind of results your search should offer, you can use shortcuts." msgstr "Um deine Suche genauer zu spezifizieren, können Hashtags verwendet werden." msgid "Hashtags should be written at end end of your input like this:" msgstr "Sie sollten folgendermaßen an das Ende eines Inputs geschrieben werden:" msgid "Available Hashtags for [Words] search" msgstr "Verfügbare Hashtags für die [Wörter] Suche" msgid "Search for nouns" msgstr "Suche nach Nomen" msgid "Search for verbs" msgstr "Suche nach Verben" msgid "Search for transitive verbs" msgstr "Suche nach transitiven Verben" msgid "Search for intransitive verbs" msgstr "Suche nach intransitiven Verben" msgid "Search for adverb" msgstr "Suche nach Adverben" msgid "Search for auxilary verbs" msgstr "Suche nach Hilfsverben" msgid "Search for adjectives" msgstr "Suche nach Adjektiven" msgid "Search for pronouns" msgstr "Suche nach Pronomen" msgid "Search for conjugations" msgstr "Suche nach Konjugationen" msgid "Search for prefixes" msgstr "Suche nach Prefixen" msgid "Search for suffixes" msgstr "Suche nach Suffixen" msgid "Search for japanese particles" msgstr "Suche nach Japanischen Partikeln [z.B. を]" msgid "Lists iru/eru ending verbs which are conjugated as godan verbs" msgstr "Zeige iru/eru endende Verben, die als Godan verb konjugiert werden" msgid "Search for sfx words [comic sounds]" msgstr "Suche nach sfx-Wörtern [z.b. Sounds in Comics]" msgid "Search for words used for counting" msgstr "Suche nach Wörtern zum Zählen" msgid "Search for expressions" msgstr "Suche nach Ausdrücken" msgid "Search for words used as interjections" msgstr "Suche nach Wörtern, welche als Interjektionen genutzt werden" msgid "Search for numeric words" msgstr "Suche nach numerischen Wörtern" msgid "Search for abbreviations" msgstr "Suche nach Abkürzungen" msgid "Search for words that don't fit in any category" msgstr "Suche nach Wörtern, welche in keine andere Kategorie passen" msgid "Search for words included in the specific JLPT level" msgstr "Suche nach Wörtern aus dem jeweiligen JLPT Level" msgid "Search in the [words] category" msgstr "Suche in der [Wörter] Kategorie" msgid "Search in the [sentences] category" msgstr "Suche in der [Sätze] Kategorie" msgid "Search in the [name] category" msgstr "Suche in der [Namen] Kategorie" msgid "Search in the [kanji] category" msgstr "Suche in der [Kanji] Kategorie" msgid "Available Hashtags for [Sentence] search" msgstr "Verfügbare Hashtags für die [Sätze] Suche" msgid "Search for sentences included in the specific JLPT level" msgstr "Suche nach Sätzen aus dem jeweiligen JLPT Level" msgid "Hide translations by default to translate them yourself and check if its correct" msgstr "Zeige Übersetzungen eingeklappt an, damit du die Sätze selber übersetzen und gegenprüfen kannst" msgid "Available Hashtags for [Kanji] search" msgstr "Verfügbare Hashtags für die [Kanji] Suche" msgid "Search for kanji included in the specific Genki chapter" msgstr "Suche nach Kanji aus dem jeweiligen Genki Kapitel" msgid "Radical search" msgstr "Radical suche" msgid "The radical picker allows searching for radicals to make the process of picking radicals even faster. The supported inputs are as following:" msgstr "Der Radikalsucher ermöglicht eine Suche innerhalb der Radikalen um eine noch schnellere Kanji Inputmethode zur Verfügung zu stellen. Die unterstützten Input-typen sind wie folgt:" msgid "Results in all radicals used to build given kanji characters" msgstr "Gibt alle Radikale an, die benutzt werden um die gesuchten kanji zusammenezustellen" msgid "Searches in words for the given query and returns in result-matching radicals" msgstr "Führt eine Wortsuche durch und gibt alle Radikale an, die in den Ergebnissen benutzt werden" msgid "Tries to find the given query in radicals names, otherwise does a word search and returns the result's kanji" msgstr "Versucht Radikale bei ihren Namen zu finden. Gelingt dies nicht, wird eine Wortsuche durchgeführt" ## Name search msgid "Full name" msgstr "Vollständer Name" msgid "Sex" msgstr "Geschlecht" msgid "Name origin" msgstr "Namensherkunft" ## Kanji results msgid "Part" msgid_plural "Parts" msgstr[0] "Bestandteil" msgstr[1] "Bestandteile" # strokes suffix msgid "{} stroke" msgid_plural "{} strokes" msgstr[0] "{} Strich" msgstr[1] "{} Striche" msgid "Decomposition" msgstr "Zusammensetzung" msgid "Radical" msgstr "Radikal" msgid "Kun" msgstr "" msgid "On" msgstr "" msgid "On reading compounds" msgstr "On - Zusammensetzungen" msgid "Kun reading compounds" msgstr "Kun - Zusammensetzungen" msgid "JLPT level" msgstr "JLPT Level" msgid "of 2500 most used kanji in newspapers" msgstr "der 2500 meist genutzten Kanji in Zeitungen" msgid "Similar Kanji" msgstr "Ähnliche Kanji" msgid "Chinese reading" msgstr "Chinesische Lesungen" msgid "Korean reading" msgstr "Koreanische Lesungen" msgid "Vietnamese reading" msgstr "Vietnamesische Lesungen" msgid "Japanese names" msgstr "Japanische Namen" ## Word results msgid "Words and kanji" msgstr "Wörter und Kanjis" msgid "{} could be an inflection of {}, with this form:" msgid_plural "{} könnte eine Flexion sein von {}, mit diesen Formen:" msgstr[0] "{} könnte eine Flexion sein von {}, mit dieser Form:" msgstr[1] "{} könnte eine Flexion sein von {}, mit diesen Formen:" msgid "Temporarily switched language to {}" msgstr "Vorübergehend zu {} gewechselt" msgctxt "inflection" msgid "Negative" msgstr "Negativ" msgctxt "inflection" msgid "Polite" msgstr "Höflichkeit" msgctxt "inflection" msgid "Present" msgstr "Gegenwart" msgctxt "inflection" msgid "Past" msgstr "Vergangenheit" msgctxt "inflection" msgid "TeForm" msgstr "Te-Form" msgctxt "inflection" msgid "Potential" msgstr "Möglichkeit" msgctxt "inflection" msgid "Passive" msgstr "Passiv" msgctxt "inflection" msgid "Causative" msgstr "Kausativ" msgctxt "inflection" msgid "PotentialOrPassive" msgstr "Potential or Passiv" msgctxt "inflection" msgid "Imperative" msgstr "Imperativ" msgctxt "inflection" msgid "Tai" msgstr "たい (Gibt an etwas tun zu wollen)" msgctxt "inflection" msgid "TeIru" msgstr "ている (Gibt eine andauernde Aktion an)" msgctxt "inflection" msgid "TeAru" msgstr "てある (Bedeutet etwas wurde getan)" msgctxt "inflection" msgid "TeMiru" msgstr "てみる (Bedeutet etwas zu \"versuchen\")" msgctxt "inflection" msgid "Tara" msgstr "たら (Gibt eine Bedingung an)" msgid "Taught in {} grade" msgstr "Beigebracht in der {}. Klasse" msgid ", with this form:" msgid_plural ", with these forms:" msgstr[0] ", mit dieser Form:" msgstr[1] ", mit diesen Formen:" msgid "Show Conjugations" msgstr "Zeige Konjugationen" msgid "Show collocation" msgid_plural "Show collocations" msgstr[0] "Zeige Kollokation" msgstr[1] "Zeige Kollokationen" msgid "Collocations" msgstr "Kollokationen" msgid "Conjugations" msgstr "Konjugationen" msgid "Antonym of {}" msgstr "Antonym von {}" msgid "See also {}" msgstr "Siehe auch {}" msgid "Pitch accent" msgstr "Tonhöhenakzent" msgid "Other forms" msgstr "Andere Formen" msgid "Affirmative" msgstr "Positiv" msgid "Negative" msgstr "Negativ" msgid "Present" msgstr "Gegenwart" msgid "Present, polite" msgstr "Gegenwart, höflich" msgid "Past" msgstr "Vergangenheit" msgid "Past, polite" msgstr "Vergangenheit, höflich" msgid "Te-form" msgstr "Te-Form" msgid "Potential" msgstr "Potenzielle" msgid "Passive" msgstr "Passiv" msgid "Causative" msgstr "Kausativ" msgid "Causative Passive" msgstr "Kausativ-Passiv" msgid "Imperative" msgstr "Imperativ" msgid "Play audio" msgstr "Vorlesen" msgid "common word" msgstr "häufiges Wort" msgid "JLPT N{}" msgstr "" msgid "Sentence search" msgstr "Beispielsätze" msgid "Download audio" msgstr "Audio herunterladen" msgid "Direct reference" msgstr "Einzelnachweis" # "no words found" msgid "words" msgstr "Wörter" # gairaigo msgid "From {}: {}" msgstr "Vom {}: {}" ## Sentence search msgid "hide" msgstr "verstecken" msgid "show" msgstr "zeigen" # "No sentences found" msgid "sentences" msgstr "Sätze" ## About page # Title 1 msgid "About" msgstr "Über Jotoba" msgid "Jotoba is a multilingual Japanese dictionary. It is easy to find translations for words or kanji, see example sentences and the way names can be written." msgstr "Jotoba ist ein mehrsprachiges Japanisch Wörterbuch. Es ist einfach Übersetzungen für Wörter oder Kanji zu finden oder zu sehen, wie Beispielsätze und Namen geschrieben werden." msgid "Jotoba is open source. Check out our" msgstr "Jotob ist Open Source. Besuche unsere" msgid "Github page" msgstr "Github Seite" msgid "if you want to contribute or host Jotoba yourself." msgstr ", wenn du Jotoba helfen willst zu wachsen oder auch selbst hosten möchtest." msgid "Trello Board" msgstr "Trello Roadmap" msgid "aswell if you are interested in upcoming features and what we are currently working on!" msgstr ", wenn du an zukünftigen Features oder unserer aktuellen Arbeit interessiert bist!" # Title 2 msgid "Data Sources and Inspiration" msgstr "Quellen und Inspirationen" msgid "Of course this project wouldn't have been possible without the help of some great data sources." msgstr "Jotoba wäre ohne die Hilfe von vielen großartigen und kostenlosen Projekten niemals möglich gewesen." msgid "Many thanks to every one of them for providing such a variety of data people can use to learn the japanese language." msgstr "Vielen Dank an jeden, der daran beteiligt war diese große Vielfalt an Daten für Japanisch Lernende zu erschaffen. " # Source msgid "Jisho" msgstr "" msgid "Joto-kun" msgstr "" msgid "Joto-kun was created by a good friend of ours who is truly a wizard when it comes down to design!" msgstr "Joto-kun wurde von einer guten Freundin erstellt, welche eine wahrere Zauberin ist, wenn es ums Designen geht!" msgid "Jisho, created by Kim Ahlström, Miwa Ahlström and Andrew Plummer is a pretty and powerful english-japanese dictionary." msgstr "Jisho, welches von Kim Ahlström, Miwa Ahlström und Andrew Plummer erstellt wurde, ist ein großartiges Englisch-Japanisch Wörterbuch." msgid "We took inspiration in their work and design to improve on their concept and offer it to a wider variety of people." msgstr "Wir haben uns von ihrer Arbeit inspirieren lassen um ihr Konzept und Design weiterzuentwickeln und so vielen Menschen wie möglich zur Verfügung stellen zu können." # Source msgid "Words (except sound effects), Kanji and Names available on this site are publicly provided and maintained by" msgstr "Wörter (außer Sound Effekten), Kanji und Namen auf unserer Seite sind öffentlich bereitgestellt und fallen unter die" msgid "and available under the license" msgstr "und verfügbar unter der Lizenz" msgid "Additionally, the RADKFILE by Jin Breen is used to link Radicals to Kanji." msgstr "Außerdem wird die RADKFILE von JinBreen verwendet, welche Radikale und Kanji verlinkt." # Source msgid "Audio Files" msgstr "Audio Dateien" msgid "The audio files #1 were graciously made public by" msgstr "Die Audio Dateien #1 wurden netterweise von" msgid "WaniKani" msgstr "WaniKani" msgid "and" msgstr "und" msgid "Tofugo" msgstr "Tofugo" msgid "and uploaded to Github under the CC-BY-4.0 licence." msgstr "öffentlich gemacht und auf Github unter der CC-BY-4.0 Lizenz veröffentlicht." msgid "The audio files #2 are provided by the" msgstr "Die Audio dateien #2 wurden vom" msgid "Kanji alive project" msgstr "Kanji alive Projekt" msgid "and are also available under the CC-BY-4.0 license." msgstr "veröffentlicht und stehen auch unter der CC-BY-4.0 Lizenz." msgid "Manga Sound Effects" msgstr "Manga Sound Effekte" msgid "The data about Sound Effects is graciously provided by Chris Kincaid and is used as additional data in the word search." msgstr "Die Daten der Sound Effekte wurden uns netterweise von Chris Kincaid zur Verfügung gestellt und können über die Wörtersuche gefunden werden." # Source msgid "Sentences are provided by Tatoeba under the Creative Commons CC 1.0 and 2.0 licences. " msgstr "Sätze sind von Tatoeba unter den Creative Commons CC 1.0 und 2.0 Lizenzen bereitgestellt." # Source msgid "Kanji Animations" msgstr "Kanji Animationen" msgid "The raw data used for kanji animations is publicly provided by KanjiVG, a project by Ulrich Apel." msgstr "Die Rohdaten der Kanji Animationen wurden von Ulrich Apel und seinem Projekt 'KanjiVG' bereitgestellt." msgid "The conversion into images and animated SVG is done by a ruby script which was made by" msgstr "Die Konvertierung der Bilder und animierten SVG wurde durch ein Ruby Skript von" msgid "Kimtaro" msgstr "" msgid "and altered by" msgstr "erschaffen, welches von" msgid "Yukáru" msgstr "Yukáru speziell für Jotoba angepasst wurde." # Source msgid "JLPT Data" msgstr "JLPT Daten" msgid "Data about JLPT proficiencies are by provided by Jonathan Waller." msgstr "Die Informationen über die JLPT Fähigkeitslevel sind von Jonathan Waller zur Verfügung gestellt worden." msgid "There is also some non-free data available on his website, so check it out if you are interested." msgstr "Auf seiner Webseite gibt es noch weitere, nicht kostenlose Daten, falls du dich für seine Arbeit interessiert." # Source msgid "Word tokenization" msgstr "Wort-Tokenisierung" msgid "Word tokenization is done using UniDic, by the UniDic Consortium and used for Japanese morphological analysis implementations." msgstr "Word tokenization is done using UniDic, by the UniDic Consortium and used for Japanese morphological analysis implementations." #Source msgid "Pitch accents" msgstr "Pitch-Akzente" msgid "Data about Radicals used in specific Kanji are provided by Kanjium." msgstr "Diese Daten wurden vom Kanjium Projekt zur Verfügung gestellt." msgid "On the project's Github Page you can find lots of data about Kanji." msgstr "Auf ihrer Github Seite findet man viele verschiedene Infos über Kanji." msgid "Pitch accent data has been extracted from UniDic." msgstr "Pitch-Akzent Daten wurden aus dem UniDic extrahiert." ## Jmdict # Dialect(s) msgid "{} dialect" msgstr "{} Dialekt" # Information msgid "ateji" msgstr "" msgid "irregular kana" msgstr "Unregelmäßiges Kana" msgid "irregular kanji" msgstr "Unregelmäßiges Kanji" msgid "irregular okurigana" msgstr "Unregelmäßiges Okurigana" msgid "outdated kana" msgstr "veraltetes Kana" msgid "outdated kanji" msgstr "veraltetes Kanji" msgid "gikun" msgstr "" msgid "usually written in kana" msgstr "Normalerweise geschrieben in Kana" msgid "rarely used kanji form" msgstr "Selten verwendete Kanjiform" # Misc msgid "Abbreviation" msgstr "Abkürzung" msgid "Archaism" msgstr "Archaismus" msgid "Character" msgstr "Charakter" msgid "Childrens language" msgstr "Kindersprache" msgid "Colloquialism" msgstr "Umgangssprache" msgid "Company name" msgstr "Firmenname" msgid "Creature" msgstr "Kreatur" msgid "Dated term" msgstr "Datierter Begriff" msgid "Deity" msgstr "Gottheit" msgid "Derogatory" msgstr "Abwertend" msgid "Document" msgstr "Dokument" msgid "Event" msgstr "Event" msgid "Familiar language" msgstr "Vertraute Sprache" msgid "Female term/language" msgstr "Weblicher Begriff/Sprache" msgid "Fiction" msgstr "Fiktion" msgid "Given name" msgstr "Vorname" msgid "Group" msgstr "Gruppe" msgid "Historical term" msgstr "Historischer Begriff" msgid "Honorific language" msgstr "Höfliche Sprache" msgid "Humble language" msgstr "Bescheidene Sprache" msgid "Idiomatic expression" msgstr "Idiomatischer Ausdruck" msgid "Jocular humorous term" msgstr "Scherzhafter humoristischer Begriff" msgid "Legend" msgstr "Legende" msgid "Literary/formal term" msgstr "Literarischer/formeller Begriff" msgid "Manga slang" msgstr "Manga-Slang" msgid "Male term/language" msgstr "Männlicher Begriff/Sprache" msgid "Mythology" msgstr "Mythologie" msgid "Internet slang" msgstr "Internet-Slang" msgid "Object" msgstr "Objekt" msgid "Obsolete term" msgstr "Veralteter Begriff" msgid "Obscure term" msgstr "Unbekannter Begriff" msgid "Onomatopoetic or mimetic word" msgstr "Onomatopoetisches oder mimetisches Wort" msgid "Organization name" msgstr "Organisationsname" msgid "Other" msgstr "Andere" msgid "Person name" msgstr "Personenname" msgid "Place name" msgstr "Ortsname" msgid "Poetical term" msgstr "Poetischer Begriff" msgid "Polite language" msgstr "Höfliche Sprache" msgid "Product name" msgstr "Produktname" msgid "Proverb" msgstr "Sprichwort" msgid "Qutation" msgstr "Zitat" msgid "Rare" msgstr "Selten" msgid "Religion" msgstr "" msgid "Sensitive" msgstr "Sensibel" msgid "Service" msgstr "" msgid "Slang" msgstr "" msgid "Railway station" msgstr "Bahnhof" msgid "Family or surname" msgstr "Familien- oder Nachname" msgid "Usually written in kana" msgstr "Üblicherweise in Kana geschrieben" msgid "Unclassified name" msgstr "Nicht klassifizierter Name" msgid "Vulgar expression/word" msgstr "Vulgärer Ausdruck/Wort" msgid "Artwork" msgstr "Kunstwerk" msgid "Rude/x-rated term" msgstr "Nicht Jugendfreier Begriff" msgid "Yojijukugo" msgstr "" # Fields msgid "{} term" msgstr "{} Begriff" # The following words will be inserted in the brackets above. Check the Syntax. msgid "Agriculture" msgstr "Landwirtschaft" msgid "Anatomy" msgstr "Anatomie" msgid "Archeology" msgstr "Archäologie" msgid "Architecture" msgstr "Architektur" msgid "Art aesthetics" msgstr "Kunstästhetik" msgid "Astronomy" msgstr "Astronomie" msgid "Audio/visual" msgstr "Audio/visuell" msgid "Aviation" msgstr "Luftfahrt" msgid "Baseball" msgstr "" msgid "Biochemistry" msgstr "Biochemie" msgid "Biology" msgstr "Biologie" msgid "Botany" msgstr "Botanik" msgid "Buddhism" msgstr "Buddhismus" msgid "Business" msgstr "" msgid "Chemistry" msgstr "Chemie" msgid "Christianity" msgstr "Christentum" msgid "Computing" msgstr "Computer" msgid "Clothing" msgstr "Kleidung" msgid "Crystallography" msgstr "Kristallographie" msgid "Ecology" msgstr "Ökologie" msgid "Economics" msgstr "Wirtschaft" msgid "Electricity" msgstr "Elektrizität" msgid "Electronics" msgstr "Elektronik" msgid "Embryology" msgstr "Embryologie" msgid "Engineering" msgstr "Ingenieurwesen" msgid "Entomology" msgstr "Entomologie" msgid "Finance" msgstr "Finanzen" msgid "Fishing" msgstr "Fischen" msgid "FoodCooking" msgstr "Essen kochen" msgid "Gardening" msgstr "Gärtnerei" msgid "Genetics" msgstr "Genetik" msgid "Geography" msgstr "Geografie" msgid "Geology" msgstr "Geologie" msgid "Geometry" msgstr "Geometrie" msgid "Go (game)" msgstr "" msgid "Golf" msgstr "" msgid "Grammar" msgstr "Grammatik" msgid "Greek mythology" msgstr "Griechische Mythologie" msgid "Hanafuda" msgstr "" msgid "Horseracing" msgstr "Pferderennen" msgid "Law" msgstr "Recht" msgid "Linguistics" msgstr "Linguistik" msgid "Logic" msgstr "Logik" msgid "Martial arts" msgstr "" msgid "Mahjong" msgstr "" msgid "Mathematics" msgstr "Mathemaik" msgid "MechanicalEngineering" msgstr "Maschinenbau" msgid "Medicine" msgstr "Medizin" msgid "Climate/weather" msgstr "Klima/Wetter" msgid "Military" msgstr "Militär" msgid "Music" msgstr "Musik" msgid "Ornithology" msgstr "Vogelkunde" msgid "Paleontology" msgstr "Paläontologie" msgid "Pathology" msgstr "Pathologie" msgid "Pharmacy" msgstr "Pharmazie" msgid "Philosophy" msgstr "Philosophie" msgid "Photography" msgstr "Fotografie" msgid "Physics" msgstr "Physik" msgid "Physiology" msgstr "Physiologie" msgid "Printing" msgstr "" msgid "Psychology" msgstr "Psychologie" msgid "Psychiatry" msgstr "Psychatrie" msgid "Railway" msgstr "Eisenbahn" msgid "Shinto" msgstr "" msgid "Shogi" msgstr "" msgid "Sports" msgstr "Sport" msgid "Statistics" msgstr "Statistiken" msgid "Sumo" msgstr "" msgid "Telecommunications" msgstr "Telekommunikation" msgid "Trademark" msgstr "" msgid "Videogame" msgstr "Videospiel" msgid "Zoology" msgstr "Zoologie" # Part of speech msgid "Godan verb" msgstr "Godan Verb" msgid "Irregular verb with {} ending" msgstr "Unregelmäßiges Wort mit {} Endung" msgid "SoundFx" msgstr "" msgid "Expression" msgstr "Ausdruck" msgid "Counter" msgstr "Zählwort" msgid "Suffix" msgstr "" msgid "Prefix" msgstr "Präfix" msgid "Particle" msgstr "Partikel" msgid "Interjection" msgstr "Ausruf" msgid "Symbol" msgstr "" msgid "Pronoun" msgstr "Pronomen" msgid "Auxilary" msgstr "" msgid "Numeric" msgstr "Nummer" msgid "Adverb-To" msgstr "To-Adverb" msgid "Adverb" msgstr "Adverb" msgid "Adjective" msgstr "Adjektiv" msgid "Auxilary adjective" msgstr "Hilfsadjektiv" msgid "Auxilary Verb" msgstr "Hilfsverb" msgid "Verb" msgstr "Verb" msgid "Conjugation" msgstr "Konjugation" msgid "Unclassified" msgstr "nicht klassifiziert" msgid "Noun or verb describing a noun" msgstr "Nomen beschreibendes Nomen oder Verb" msgid "I adjective" msgstr "I Adjektiv" msgid "I adjective (conjugated like いい)" msgstr "I Adjektiv (konjugiert wie いい)" msgid "Ku adjective" msgstr "Ku Adjektiv" msgid "Na adjective" msgstr "Na Adjektiv" msgid "Formal form of na adjective" msgstr "Formelle Form eines na-Adjektives" msgid "No adjective" msgstr "No-Adjektiv" msgid "Pre noun adjective" msgstr "Pre-Nomen Adjektiv" msgid "Shiku adjective" msgstr "Shiku Adjektiv" msgid "Taru adjective" msgstr "Taru Adjektiv" msgid "Noun" msgstr "Nomen" msgid "Noun adverbial" msgstr "adverbiales Nomen" msgid "Prefix (noun)" msgstr "Präfix (Nomen)" msgid "Suffix (noun)" msgstr "Suffix (Nomen)" msgid "Temporal noun" msgstr "zeitliches Nomen" msgid "Unspecified verb" msgstr "Unspezifiziertes Verb" msgid "Intransitive verb" msgstr "Intransitives Verb" msgid "Transitive verb" msgstr "Transitives Verb" msgid "Ichidan verb" msgstr "Ichidan Verb" msgid "Ichidan zuru verb" msgstr "Ichidan zuru Verb" msgid "Ichidan kureru verb" msgstr "Ichidan kureru Verb" msgid "Kuru verb" msgstr "Kuru Verb" msgid "Noun taking suru" msgstr "" msgid "Suru verb" msgstr "Suru Verb" msgid "Suru special" msgstr "Suru verb, Spezialfall" msgid "Pre-noun" msgstr "" # this thingy -> " " msgid "Space" msgstr "Leerzeichen" ## Name search msgctxt "name_type" msgid "Company" msgstr "Firma" msgctxt "name_type" msgid "Female" msgstr "Weiblich" msgctxt "name_type" msgid "Male" msgstr "Männlich" msgctxt "name_type" msgid "Organization" msgstr "Organisation" msgctxt "name_type" msgid "Persons name" msgstr "Personenname" msgctxt "name_type" msgid "Place" msgstr "Platz" msgctxt "name_type" msgid "Product" msgstr "Produkt" msgctxt "name_type" msgid "(Railway)Station" msgstr "Haltestelle" msgctxt "name_type" msgid "Surname" msgstr "Familienname" msgctxt "name_type" msgid "Unknown" msgstr "unbekannt" msgctxt "name_type" msgid "Art work" msgstr "Kunstwerk" msgctxt "name_type" msgid "Character" msgstr "Charakter" msgctxt "name_type" msgid "Deity" msgstr "Göttliches Wesen" msgctxt "name_type" msgid "Document" msgstr "Dokument" msgctxt "name_type" msgid "Event" msgstr "" msgctxt "name_type" msgid "Fiction" msgstr "Fiktion" msgctxt "name_type" msgid "Group" msgstr "Gruppe" msgctxt "name_type" msgid "Legend" msgstr "Legende" msgctxt "name_type" msgid "Mythology" msgstr "Mythologie" msgctxt "name_type" msgid "Object" msgstr "Objekt" msgctxt "name_type" msgid "Other" msgstr "Andere" msgctxt "name_type" msgid "Religion" msgstr "" msgctxt "name_type" msgid "Service" msgstr "" # "No names found" msgid "names" msgstr "Namen" ## Search help msgid "Search Help" msgstr "Suchhilfe" msgid "No {} found" msgstr "Keine {} gefunden" msgid "Your default search language might not fit your input" msgstr "Deine eingestellte Sprache und die in der gesuchten Sprache stimmen nicht überein" msgid "Check your search for typos" msgstr "Überprüfe deine Suche nach Tippfehlern" msgid "Use more generic search terms" msgstr "Benutze allgemeinere Begriffe" msgid "Try finding your search in a different category using" msgstr "Versuche in einer anderen Kategorie zu suchen, mithilfe " msgid "Your search request might not be included in our database yet" msgstr "Deine Suchanfrage ist (noch) nicht in unserer Datenbank" msgid "If you think your search should be contained in our database, submit an issue on" msgstr "Wenn du der Meinung bist, diese Suchanfrage sei in unserer Datenbank, erstelle gerne ein Issue auf" # Also check our Trello board since we might be working on it msgid "Also check our" msgstr "Außerdem besuche unser" msgid "since we might be working on it!" msgstr ", denn es kann sein, dass wir bereits daran arbeiten!" # Paginator msgid "First" msgstr "Erste" msgid "Last" msgstr "Letzte" ================================================ FILE: locales/en.po ================================================ # SINGULAR # msgctxt "" # msgid "" # msgstr "" # PLURAL # msgctxt "" # msgid "" # msgid_plural "" # msgstr[0] "" # OPTIONAL: msgstr[1] "" msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "PO-Revision-Date: 2021-12-12 16:30+0100\n" "Last-Translator: \n" "Language-Team: English\n" "Language: en\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" msgid "Jotoba" msgstr "" #### Base template msgid "Search..." msgstr "" msgid "Search" msgstr "" msgid "Settings" msgstr "" msgid "Radicals" msgstr "" msgid "Voice" msgstr "" # Rad Help Msg msgid "This tool allows you to find Kanji by their core components (Radicals)" msgstr "" msgid "You can select Radicals below and add found Kanji to the search bar" msgstr "" msgid "Enter" msgstr "" msgid "to start searching" msgstr "" # Rad Btns msgid "Reset" msgstr "" msgid "reset" msgstr "" msgid "apply" msgstr "" msgid "create" msgstr "" msgid "here" msgstr "" msgid "Accept" msgstr "" msgid "Decline" msgstr "" # Search type / Dropdown msgid "Words" msgstr "" msgid "Kanji" msgstr "" msgid "Sentences" msgstr "" msgid "Names" msgstr "" # Speech overlay msgid "Current language" msgstr "" msgid "Currently listening" msgstr "" msgid "No" msgstr "" msgid "To change your language, select one of the following" msgstr "" # Image search overlay msgid "Enter a URL or upload your image directly and Jotoba will try to search for japanese words contained in the picture." msgstr "" msgid "Enter Image URL..." msgstr "" # Rad Picker overlay msgid "Select Radicals" msgstr "" msgid "Select Kanji" msgstr "" msgid "Search Radicals..." msgstr "" # Notification overlay msgid "Notifications" msgstr "" msgid "No new notifications" msgstr "" msgid "Show all" msgstr "" msgid "Close" msgstr "" # Languages msgid "English" msgstr "" msgid "German" msgstr "" msgid "Russian" msgstr "" msgid "Spanish" msgstr "" msgid "Swedish" msgstr "" msgid "French" msgstr "" msgid "Dutch" msgstr "" msgid "Hungarian" msgstr "" msgid "Slovenian" msgstr "" msgid "Japanese" msgstr "" msgctxt "foreign_lang" msgid "German" msgstr "" msgctxt "foreign_lang" msgid "English" msgstr "" msgctxt "foreign_lang" msgid "Georgian" msgstr "Georgian" msgctxt "foreign_lang" msgid "Chinese" msgstr "Chinese" msgctxt "foreign_lang" msgid "Manchu" msgstr "Manchu" msgctxt "foreign_lang" msgid "Kurdish" msgstr "Kurdish" msgctxt "foreign_lang" msgid "ChinookJargon" msgstr "Chinook jargon" msgctxt "foreign_lang" msgid "Italian" msgstr "Italian" msgctxt "foreign_lang" msgid "Malayalam" msgstr "Malayalam" msgctxt "foreign_lang" msgid "Tibetian" msgstr "Tibetian" msgctxt "foreign_lang" msgid "Mongolian" msgstr "Mongolian" msgctxt "foreign_lang" msgid "Romanian" msgstr "Romanian" msgctxt "foreign_lang" msgid "Bantu" msgstr "Bantu" msgctxt "foreign_lang" msgid "Norwegian" msgstr "Norwegian" msgctxt "foreign_lang" msgid "Icelandic" msgstr "Icelandic" msgctxt "foreign_lang" msgid "Breton" msgstr "Breton" msgctxt "foreign_lang" msgid "Maori" msgstr "Maori" msgctxt "foreign_lang" msgid "Latin" msgstr "Latin" msgctxt "foreign_lang" msgid "Amharic" msgstr "Amharic" msgctxt "foreign_lang" msgid "Khmer" msgstr "Khmer" msgctxt "foreign_lang" msgid "Swahili" msgstr "Swahili" msgctxt "foreign_lang" msgid "Hebrew" msgstr "Hebrew" msgctxt "foreign_lang" msgid "Galician" msgstr "Galician" msgctxt "foreign_lang" msgid "Korean" msgstr "Korean" msgctxt "foreign_lang" msgid "Tamil" msgstr "Tamil" msgctxt "foreign_lang" msgid "Viatnamese" msgstr "Viatnamese" msgctxt "foreign_lang" msgid "Polish" msgstr "Polish" msgctxt "foreign_lang" msgid "Sanskrit" msgstr "Sanskrit" msgctxt "foreign_lang" msgid "Persian" msgstr "Persian" msgctxt "foreign_lang" msgid "Filipino" msgstr "Filipino" msgctxt "foreign_lang" msgid "Moldavian" msgstr "Moldavian" msgctxt "foreign_lang" msgid "Croatian" msgstr "Croatian" msgctxt "foreign_lang" msgid "Thai" msgstr "Thai" msgctxt "foreign_lang" msgid "Burmese" msgstr "Burmese" msgctxt "foreign_lang" msgid "Slovak" msgstr "Slovak" msgctxt "foreign_lang" msgid "Czech" msgstr "Czech" msgctxt "foreign_lang" msgid "Hindi" msgstr "Hindi" msgctxt "foreign_lang" msgid "Mapudungun" msgstr "Mapudungun" msgctxt "foreign_lang" msgid "Turkish" msgstr "Turkish" msgctxt "foreign_lang" msgid "Hawaiian" msgstr "Hawaiian" msgctxt "foreign_lang" msgid "Afrikaans" msgstr "Afrikaans" msgctxt "foreign_lang" msgid "Esperanto" msgstr "Esperanto" msgctxt "foreign_lang" msgid "Yiddish" msgstr "Yiddish" msgctxt "foreign_lang" msgid "Somali" msgstr "Somali" msgctxt "foreign_lang" msgid "Tahitian" msgstr "Tahitian" msgctxt "foreign_lang" msgid "Urdu" msgstr "Urdu" msgctxt "foreign_lang" msgid "Indonesian" msgstr "Indonesian" msgctxt "foreign_lang" msgid "Estonian" msgstr "Estonian" msgctxt "foreign_lang" msgid "Bulgarian" msgstr "Bulgarian" msgctxt "foreign_lang" msgid "Arabic" msgstr "Arabic" msgctxt "foreign_lang" msgid "Danish" msgstr "Danish" msgctxt "foreign_lang" msgid "Portuguese" msgstr "Portuguese" msgctxt "foreign_lang" msgid "Greek" msgstr "Greek" msgctxt "foreign_lang" msgid "Finnish" msgstr "Finnish" msgctxt "foreign_lang" msgid "Ainu" msgstr "Ainu" msgctxt "foreign_lang" msgid "Algonquian" msgstr "Algonquian" msgctxt "foreign_lang" msgid "French" msgstr "French" # Settings msgid "Language" msgstr "" msgid "Display" msgstr "" msgid "General" msgstr "" msgid "Default search language" msgstr "" msgid "Page language" msgstr "" msgid "Always show english results" msgstr "" msgid "Show english results on top" msgstr "" msgid "Focus search bar on load" msgstr "" msgid "Select input on load" msgstr "" msgid "Results per page" msgstr "" msgid "Number..." msgstr "" msgid "Input has to be in range of 1 and 100!" msgstr "" msgid "max amount of names/words/sentences shown per page" msgstr "" msgid "Show Furigana" msgstr "" msgid "Show example sentences" msgstr "" msgid "Items per page" msgstr "" msgid "max amount of kanji shown per page" msgstr "" msgid "Use dark mode" msgstr "" msgid "Show kanji on load" msgstr "" msgid "Show kanji numbers" msgstr "" msgid "Default kanji animation speed" msgstr "" msgid "Enable Quick-Copy" msgstr "" msgid "Share usage statistics" msgstr "" msgid "STATISTICS_EXPLANATION" msgstr "To help improve Jotoba, we collect certain anonymous data (in accordance with the GDPR) by default - but if you wish, we will stop collecting anything from you." # Cookie text msgid "To use this feature you have to accept to the use of cookies." msgstr "" msgid "Your data will only be used for your personal website settings." msgstr "" # Prefix of cookie revoke text msgid "Click" msgstr "" # Suffix of Cookie revoke text msgid "to revoke your Cookies agreement" msgstr "" # Suffix of Cookie agree text msgid "to enable Cookies" msgstr "" # Footer msgid "Jotoba wouldn't be able to exist without the help of many open-source data sources" msgstr "" msgid "About Page" msgstr "" # Cookie Footer msgid "We use cookies to improve your experience and deliver personalized content." msgstr "" msgid "By using Jotoba you agree to our" msgstr "" # - End of the above sentence - msgid "." msgstr "" msgid "privacy policy" msgstr "" msgid "Only use necessary" msgstr "" msgid "Allow Cookies" msgstr "" ### About Page msgid "is a multilingual Japanese dictionary" msgstr "" msgid "It is easy to find translations for words or kanji, see example sentences and the way names can be written." msgstr "" msgid "Here are some examples on how to use this page" msgstr "" msgid "Quickly change the search type by pressing" msgstr "" msgid "You can specify your search by typing" msgstr "" msgid "You can find verbs that are conjugated" msgstr "" msgid "You can search multiple kanji at once" msgstr "" msgid "is open source" msgstr "" msgid "Check out our" msgstr "" msgid "Check out the" msgstr "" msgid "aswell if you are interested in upcoming features and what we are currently working on" msgstr "" msgid "for a list of all contributors in this project." msgstr "" msgctxt "index" msgid "or" msgstr "" msgctxt "index" msgid "Press" msgstr "" msgid "to instantly focus the search bar" msgstr "" ### Info / Help Page msgid "Shortcuts" msgstr "" msgid "To improve the quality of life on Jotoba, we offer some shortcuts to quickly navigate the page:" msgstr "" msgid "Everywhere" msgstr "" msgid "Quickly change between words | sentences | names | kanji tabs" msgstr "" msgid "Focus the search bar" msgstr "" msgid "Focussed search bar" msgstr "" msgid "Iterate suggestions up | down" msgstr "" msgid "Iterate suggestions down" msgstr "" msgid "[Words] search" msgstr "" msgid "Play the first possible audio" msgstr "" msgid "[Kanji] search" msgstr "" msgid "Show / Collapse compounds" msgstr "" msgid "To specify what kind of results your search should offer, you can use shortcuts." msgstr "" msgid "Hashtags should be written at end end of your input like this:" msgstr "" msgid "Available Hashtags for [Words] search" msgstr "" msgid "Search for nouns" msgstr "" msgid "Search for verbs" msgstr "" msgid "Search for transitive verbs" msgstr "" msgid "Search for intransitive verbs" msgstr "" msgid "Search for adverb" msgstr "" msgid "Search for auxilary verbs" msgstr "" msgid "Search for adjectives" msgstr "" msgid "Search for pronouns" msgstr "" msgid "Search for conjugations" msgstr "" msgid "Search for prefixes" msgstr "" msgid "Search for suffixes" msgstr "" msgid "Search for japanese particles" msgstr "" msgid "Lists iru/eru ending verbs which are conjugated as godan verbs" msgstr "" msgid "Search for sfx words [comic sounds]" msgstr "" msgid "Search for words used for counting" msgstr "" msgid "Search for expressions" msgstr "" msgid "Search for words used as interjections" msgstr "" msgid "Search for numeric words" msgstr "" msgid "Search for abbreviations" msgstr "" msgid "Search for words that don't fit in any category" msgstr "" msgid "Search for words included in the specific JLPT level" msgstr "" msgid "Search in the [words] category" msgstr "" msgid "Search in the [sentences] category" msgstr "" msgid "Search in the [name] category" msgstr "" msgid "Search in the [kanji] category" msgstr "" msgid "Available Hashtags for [Sentence] search" msgstr "" msgid "Search for sentences included in the specific JLPT level" msgstr "" msgid "Hide translations by default to translate them yourself and check if its correct" msgstr "" msgid "Available Hashtags for [Kanji] search" msgstr "" msgid "Search for kanji included in the specific Genki chapter" msgstr "" msgid "Radical search" msgstr "" msgid "The radical picker allows searching for radicals to make the process of picking radicals even faster. The supported inputs are as following:" msgstr "" msgid "Results in all radicals used to build given kanji characters" msgstr "" msgid "Searches in words for the given query and returns in result-matching radicals" msgstr "" msgid "Tries to find the given query in radicals names, otherwise does a word search and returns the result's kanji" msgstr "" ## Name search msgid "Full name" msgstr "" msgid "Sex" msgstr "" msgid "Name origin" msgstr "" ## Kanji results msgid "Part" msgid_plural "Parts" msgstr[0] "Part" msgstr[1] "Parts" # strokes suffix msgid "{} stroke" msgid_plural "{} strokes" msgstr[0] "{} stroke" msgstr[1] "{} strokes" msgid "Decomposition" msgstr "" msgid "Radical" msgstr "" msgid "Kun" msgstr "" msgid "On" msgstr "" msgid "On reading compounds" msgstr "" msgid "Kun reading compounds" msgstr "" msgid "JLPT level" msgstr "" msgid "of 2500 most used kanji in newspapers" msgstr "" msgid "Similar Kanji" msgstr "" msgid "Chinese reading" msgstr "" msgid "Korean reading" msgstr "" msgid "Vietnamese reading" msgstr "" msgid "Japanese names" msgstr "" ## Word results msgid "Words and kanji" msgstr "" msgid "{} could be an inflection of {}, with this form:" msgid_plural "{} could be an inflection of {}, with this forms:" msgstr[0] "{} could be an inflection of {}, with this form:" msgstr[1] "{} could be an inflection of {}, with this forms:" msgid "Temporarily switched language to {}" msgstr "" msgctxt "inflection" msgid "Negative" msgstr "" msgctxt "inflection" msgid "Polite" msgstr "" msgctxt "inflection" msgid "Present" msgstr "" msgctxt "inflection" msgid "Past" msgstr "" msgctxt "inflection" msgid "TeForm" msgstr "" msgctxt "inflection" msgid "Potential" msgstr "" msgctxt "inflection" msgid "Passive" msgstr "" msgctxt "inflection" msgid "Causative" msgstr "" msgctxt "inflection" msgid "PotentialOrPassive" msgstr "Potential or Passive" msgctxt "inflection" msgid "Imperative" msgstr "" msgctxt "inflection" msgid "Tai" msgstr "たい (Want to do something)" msgctxt "inflection" msgid "TeIru" msgstr "ている (Indicates an action that is ongoing)" msgctxt "inflection" msgid "TeAru" msgstr "てある (Indicates an action that has been done intentionally)" msgctxt "inflection" msgid "TeMiru" msgstr "てみる (Means to \"Try\" something)" msgctxt "inflection" msgid "Tara" msgstr "たら (States a condition)" msgid ", with this form:" msgid_plural ", with these forms:" msgstr[0] ", with this form:" msgstr[1] ", with these forms:" msgid "Taught in {} grade" msgstr "" msgid "Show Conjugations" msgstr "" msgid "Show collocation" msgid_plural "Show collocations" msgstr[0] "Show collocation" msgstr[1] "Show collocations" msgid "Collocations" msgstr "" msgid "Conjugations" msgstr "" msgid "Antonym of {}" msgstr "Antonym of {}" msgid "See also {}" msgstr "" msgid "Pitch accent" msgstr "" msgid "Other forms" msgstr "" msgid "Affirmative" msgstr "" msgid "Negative" msgstr "" msgid "Present" msgstr "" msgid "Present, polite" msgstr "" msgid "Past" msgstr "" msgid "Past, polite" msgstr "" msgid "Te-form" msgstr "" msgid "Potential" msgstr "" msgid "Passive" msgstr "" msgid "Causative" msgstr "" msgid "Causative Passive" msgstr "" msgid "Imperative" msgstr "" msgid "Play audio" msgstr "" msgid "common word" msgstr "" msgid "JLPT N{}" msgstr "" msgid "Download audio" msgstr "" msgid "Sentence search" msgstr "" msgid "Direct reference" msgstr "" # "no words found" msgid "words" msgstr "" # gairaigo msgid "From {}: {}" msgstr "" ## Sentence search msgid "hide" msgstr "" msgid "show" msgstr "" # "No sentences found" msgid "sentences" msgstr "" ## About page # Title 1 msgid "About" msgstr "" msgid "Jotoba is a multilingual Japanese dictionary. It is easy to find translations for words or kanji, see example sentences and the way names can be written." msgstr "" msgid "Jotoba is open source. Check out our" msgstr "" msgid "Github page" msgstr "" msgid "if you want to contribute or host Jotoba yourself." msgstr "" msgid "Trello Board" msgstr "" msgid "aswell if you are interested in upcoming features and what we are currently working on!" msgstr "" # Title 2 msgid "Data Sources and Inspiration" msgstr "" msgid "Of course this project wouldn't have been possible without the help of some great data sources." msgstr "" msgid "Many thanks to every one of them for providing such a variety of data people can use to learn the japanese language." msgstr "" # Source msgid "Jisho" msgstr "" msgid "Joto-kun" msgstr "" msgid "Joto-kun was created by a good friend of ours who is truly a wizard when it comes down to design!" msgstr "" msgid "Jisho, created by Kim Ahlström, Miwa Ahlström and Andrew Plummer is a pretty and powerful english-japanese dictionary." msgstr "" msgid "We took inspiration in their work and design to improve on their concept and offer it to a wider variety of people." msgstr "" # Source msgid "Words (except sound effects), Kanji and Names available on this site are publicly provided and maintained by" msgstr "" msgid "and available under the license" msgstr "" msgid "Additionally, the RADKFILE by Jin Breen is used to link Radicals to Kanji." msgstr "" # Source msgid "Audio Files" msgstr "" msgid "The audio files #1 were graciously made public by" msgstr "" msgid "WaniKani" msgstr "" msgid "and" msgstr "" msgid "Tofugo" msgstr "" msgid "and uploaded to Github under the CC-BY-4.0 licence." msgstr "" msgid "The audio files #2 are provided by the" msgstr "" msgid "Kanji alive project" msgstr "" msgid "and are also available under the CC-BY-4.0 license." msgstr "" msgid "Manga Sound Effects" msgstr "" msgid "The data about Sound Effects is graciously provided by Chris Kincaid and is used as additional data in the word search." msgstr "" # Source msgid "Sentences are provided by Tatoeba under the Creative Commons CC 1.0 and 2.0 licences. " msgstr "" # Source msgid "Kanji Animations" msgstr "" msgid "The raw data used for kanji animations is publicly provided by KanjiVG, a project by Ulrich Apel." msgstr "" msgid "The conversion into images and animated SVG is done by a ruby script which was made by" msgstr "" msgid "Kimtaro" msgstr "" msgid "and altered by" msgstr "" msgid "Yukáru" msgstr "" # Source msgid "JLPT Data" msgstr "" msgid "Data about JLPT proficiencies are by provided by Jonathan Waller." msgstr "" msgid "There is also some non-free data available on his website, so check it out if you are interested." msgstr "" # Source msgid "Word tokenization" msgstr "" msgid "Word tokenization is done using UniDic, by the UniDic Consortium and used for Japanese morphological analysis implementations." msgstr "" #Source msgid "Pitch accents" msgstr "" msgid "Data about Radicals used in specific Kanji are provided by Kanjium." msgstr "" msgid "On the project's Github Page you can find lots of data about Kanji." msgstr "" msgid "Pitch accent data has been extracted from UniDic." msgstr "" ## Jmdict # Dialect(s) msgid "{} dialect" msgstr "" # Information msgid "ateji" msgstr "" msgid "irregular kana" msgstr "" msgid "irregular kanji" msgstr "" msgid "irregular okurigana" msgstr "" msgid "outdated kana" msgstr "" msgid "outdated kanji" msgstr "" msgid "gikun" msgstr "" msgid "usually written in kana" msgstr "" msgid "rarely used kanji form" msgstr "" # Misc msgid "Abbreviation" msgstr "" msgid "Archaism" msgstr "" msgid "Character" msgstr "" msgid "Childrens language" msgstr "" msgid "Colloquialism" msgstr "" msgid "Company name" msgstr "" msgid "Creature" msgstr "" msgid "Dated term" msgstr "" msgid "Deity" msgstr "" msgid "Derogatory" msgstr "" msgid "Event" msgstr "" msgid "Document" msgstr "" msgid "Familiar language" msgstr "" msgid "Female term/language" msgstr "" msgid "Fiction" msgstr "" msgid "Given name" msgstr "" msgid "Group" msgstr "" msgid "Historical term" msgstr "" msgid "Honorific language" msgstr "" msgid "Humble language" msgstr "" msgid "Idiomatic expression" msgstr "" msgid "Jocular humorous term" msgstr "" msgid "Legend" msgstr "" msgid "Literary/formal term" msgstr "" msgid "Manga slang" msgstr "" msgid "Male term/language" msgstr "" msgid "Mythology" msgstr "" msgid "Internet slang" msgstr "" msgid "Object" msgstr "" msgid "Obsolete term" msgstr "" msgid "Obscure term" msgstr "" msgid "Onomatopoetic or mimetic word" msgstr "" msgid "Organization name" msgstr "" msgid "Other" msgstr "" msgid "Person name" msgstr "" msgid "Place name" msgstr "" msgid "Poetical term" msgstr "" msgid "Polite language" msgstr "" msgid "Product name" msgstr "" msgid "Proverb" msgstr "" msgid "Qutation" msgstr "" msgid "Rare" msgstr "" msgid "Religion" msgstr "" msgid "Sensitive" msgstr "" msgid "Service" msgstr "" msgid "Slang" msgstr "" msgid "Railway station" msgstr "" msgid "Family or surname" msgstr "" msgid "Usually written in kana" msgstr "" msgid "Unclassified name" msgstr "" msgid "Vulgar expression/word" msgstr "" msgid "Artwork" msgstr "" msgid "Rude/x-rated term" msgstr "" msgid "Yojijukugo" msgstr "" # Fields msgid "{} term" msgstr "" msgid "Agriculture" msgstr "" msgid "Anatomy" msgstr "" msgid "Archeology" msgstr "" msgid "Architecture" msgstr "" msgid "Art aesthetics" msgstr "" msgid "Astronomy" msgstr "" msgid "Audio/visual" msgstr "" msgid "Aviation" msgstr "" msgid "Baseball" msgstr "" msgid "Biochemistry" msgstr "" msgid "Biology" msgstr "" msgid "Botany" msgstr "" msgid "Buddhism" msgstr "" msgid "Business" msgstr "" msgid "Chemistry" msgstr "" msgid "Christianity" msgstr "" msgid "Computing" msgstr "" msgid "Clothing" msgstr "" msgid "Crystallography" msgstr "" msgid "Ecology" msgstr "" msgid "Economics" msgstr "" msgid "Electricity" msgstr "" msgid "Electronics" msgstr "" msgid "Embryology" msgstr "" msgid "Engineering" msgstr "" msgid "Entomology" msgstr "" msgid "Finance" msgstr "" msgid "Fishing" msgstr "" msgid "FoodCooking" msgstr "" msgid "Gardening" msgstr "" msgid "Genetics" msgstr "" msgid "Geography" msgstr "" msgid "Geology" msgstr "" msgid "Geometry" msgstr "" msgid "Go (game)" msgstr "" msgid "Golf" msgstr "" msgid "Grammar" msgstr "" msgid "Greek mythology" msgstr "" msgid "Hanafuda" msgstr "" msgid "Horseracing" msgstr "" msgid "Law" msgstr "" msgid "Linguistics" msgstr "" msgid "Logic" msgstr "" msgid "Martial arts" msgstr "" msgid "Mahjong" msgstr "" msgid "Mathematics" msgstr "" msgid "MechanicalEngineering" msgstr "" msgid "Medicine" msgstr "" msgid "Climate/weather" msgstr "" msgid "Military" msgstr "" msgid "Music" msgstr "" msgid "Ornithology" msgstr "" msgid "Paleontology" msgstr "" msgid "Pathology" msgstr "" msgid "Pharmacy" msgstr "" msgid "Philosophy" msgstr "" msgid "Photography" msgstr "" msgid "Physics" msgstr "" msgid "Physiology" msgstr "" msgid "Printing" msgstr "" msgid "Psychology" msgstr "" msgid "Psychiatry" msgstr "" msgid "Railway" msgstr "" msgid "Shinto" msgstr "" msgid "Shogi" msgstr "" msgid "Sports" msgstr "" msgid "Statistics" msgstr "" msgid "Sumo" msgstr "" msgid "Telecommunications" msgstr "" msgid "Trademark" msgstr "" msgid "Videogame" msgstr "" msgid "Zoology" msgstr "" # Part of speech msgid "Godan verb" msgstr "" msgid "Irregular verb with {} ending" msgstr "" msgid "SoundFx" msgstr "" msgid "Expression" msgstr "" msgid "Counter" msgstr "" msgid "Suffix" msgstr "" msgid "Prefix" msgstr "" msgid "Particle" msgstr "" msgid "Interjection" msgstr "" msgid "Symbol" msgstr "" msgid "Pronoun" msgstr "" msgid "Auxilary" msgstr "" msgid "Numeric" msgstr "" msgid "Adverb-To" msgstr "" msgid "Adverb" msgstr "" msgid "Adjective" msgstr "" msgid "Auxilary adjective" msgstr "" msgid "Auxilary Verb" msgstr "" msgid "Verb" msgstr "" msgid "Conjugation" msgstr "" msgid "Unclassified" msgstr "" msgid "Noun or verb describing a noun" msgstr "" msgid "I adjective" msgstr "" msgid "I adjective (conjugated like いい)" msgstr "" msgid "Ku adjective" msgstr "" msgid "Na adjective" msgstr "" msgid "Formal form of na adjective" msgstr "" msgid "No adjective" msgstr "" msgid "Pre noun adjective" msgstr "" msgid "Shiku adjective" msgstr "" msgid "Taru adjective" msgstr "" msgid "Noun" msgstr "" msgid "Noun adverbial" msgstr "" msgid "Prefix (noun)" msgstr "" msgid "Suffix (noun)" msgstr "" msgid "Temporal noun" msgstr "" msgid "Unspecified verb" msgstr "" msgid "Intransitive verb" msgstr "" msgid "Transitive verb" msgstr "" msgid "Ichidan verb" msgstr "" msgid "Ichidan zuru verb" msgstr "" msgid "Ichidan kureru verb" msgstr "" msgid "Kuru verb" msgstr "" msgid "Noun taking suru" msgstr "" msgid "Suru verb" msgstr "" msgid "Suru special" msgstr "" msgid "Pre-noun" msgstr "" # this thingy -> " " msgid "Space" msgstr "" ## Name search msgctxt "name_type" msgid "Company" msgstr "" msgctxt "name_type" msgid "Female" msgstr "" msgctxt "name_type" msgid "Male" msgstr "" msgctxt "name_type" msgid "Organization" msgstr "" msgctxt "name_type" msgid "Persons name" msgstr "" msgctxt "name_type" msgid "Place" msgstr "" msgctxt "name_type" msgid "Product" msgstr "" msgctxt "name_type" msgid "(Railway)Station" msgstr "" msgctxt "name_type" msgid "Surname" msgstr "" msgctxt "name_type" msgid "Unknown" msgstr "" msgctxt "name_type" msgid "Art work" msgstr "" msgctxt "name_type" msgid "Character" msgstr "" msgctxt "name_type" msgid "Deity" msgstr "" msgctxt "name_type" msgid "Document" msgstr "" msgctxt "name_type" msgid "Event" msgstr "" msgctxt "name_type" msgid "Fiction" msgstr "" msgctxt "name_type" msgid "Group" msgstr "" msgctxt "name_type" msgid "Legend" msgstr "" msgctxt "name_type" msgid "Mythology" msgstr "" msgctxt "name_type" msgid "Object" msgstr "Object" msgctxt "name_type" msgid "Other" msgstr "" msgctxt "name_type" msgid "Religion" msgstr "" msgctxt "name_type" msgid "Service" msgstr "" # "No names found" msgid "names" msgstr "" ## Search help msgid "Search Help" msgstr "" msgid "No {} found" msgstr "" msgid "Your default search language might not fit your input" msgstr "" msgid "Check your search for typos" msgstr "" msgid "Use more generic search terms" msgstr "" msgid "Try finding your search in a different category using" msgstr "" msgid "Your search request might not be included in our database yet" msgstr "" msgid "If you think your search should be contained in our database, submit an issue on" msgstr "" # Also check our Trello board since we might be working on it msgid "Also check our" msgstr "" msgid "since we might be working on it!" msgstr "" # Paginator msgid "First" msgstr "" msgid "Last" msgstr "" ================================================ FILE: locales/hu.po ================================================ # SINGULAR # msgctxt "" # msgid "" # msgstr "" # PLURAL # msgctxt "" # msgid "" # msgid_plural "" # msgstr[0] "" # OPTIONAL: msgstr[1] "" msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "PO-Revision-Date: 2021-11-29 21:47+0100\n" "Last-Translator: \n" "Language-Team: English\n" "Language: en\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" msgid "Jotoba" msgstr "" #### Base template msgid "Search..." msgstr "Keresés..." msgid "Search" msgstr "Keresés" msgid "Settings" msgstr "Beállítások" msgid "Radicals" msgstr "Gyökök" msgid "Voice" msgstr "Hang" # Rad Help Msg msgid "This tool allows you to find Kanji by their core components (Radicals)" msgstr "Ezzel az eszközzel rákereshetsz Kanjikra az alkotórészeik alapján (Gyökök)" msgid "You can select Radicals below and add found Kanji to the search bar" msgstr "Alul kiválaszthatod a gyököket és a megtalált kanjit hozzáadhatod a keresősávhoz" msgid "Enter" msgstr "" msgid "to start searching" msgstr "a keresés elkezdéséhez" # Rad Btns msgid "Reset" msgstr "Visszaállítás" msgid "reset" msgstr "visszaállítás" msgid "apply" msgstr "beállítás" msgid "create" msgstr "létrehozás" msgid "here" msgstr "itt" msgid "Accept" msgstr "Elfogadás" msgid "Decline" msgstr "Visszautasítás" # Search type / Dropdown msgid "Words" msgstr "Szavak" msgid "Kanji" msgstr "Kanjik" msgid "Sentences" msgstr "Mondatok" msgid "Names" msgstr "Nevek" # Speech overlay msgid "Current language" msgstr "Kiválasztott nyelv" msgid "Currently listening" msgstr "Éppen figyel" msgid "No" msgstr "Nem" msgid "To change your language, select one of the following" msgstr "A nyelv megváltoztatásához válaszd ki az egyiket az alábbiak közül" # Image search overlay msgid "Enter a URL or upload your image directly and Jotoba will try to search for japanese words contained in the picture." msgstr "Írj be egy webcímet vagy tölts fel egy képet közvetlenül és a Jotoba megpróbál rákeresni a szavakra a képen" msgid "Enter Image URL..." msgstr "Add meg a kép címét..." # Rad Picker overlay msgid "Select Radicals" msgstr "Gyökök Kiválasztása" msgid "Select Kanji" msgstr "Kanji Kiválasztása" msgid "Search Radicals..." msgstr "Gyökök Keresése..." # Notification overlay msgid "Notifications" msgstr "Értesítések" msgid "No new notifications" msgstr "Nincs új értesítés" msgid "Show all" msgstr "Mind mutatása" msgid "Close" msgstr "Bezárás" # Languages msgid "English" msgstr "Angol" msgid "German" msgstr "Német" msgid "Russian" msgstr "Orosz" msgid "Spanish" msgstr "Spanyol" msgid "Swedish" msgstr "Svéd" msgid "French" msgstr "Francia" msgid "Dutch" msgstr "Holland" msgid "Hungarian" msgstr "Magyar" msgid "Slovenian" msgstr "Szlovák" msgid "Japanese" msgstr "Japán" msgctxt "foreign_lang" msgid "German" msgstr "Német" msgctxt "foreign_lang" msgid "English" msgstr "Angol" msgctxt "foreign_lang" msgid "Georgian" msgstr "Grúz" msgctxt "foreign_lang" msgid "Chinese" msgstr "Kínai" msgctxt "foreign_lang" msgid "Manchu" msgstr "Mandzsu" msgctxt "foreign_lang" msgid "Kurdish" msgstr "Krud" msgctxt "foreign_lang" msgid "ChinookJargon" msgstr "Csinuk" msgctxt "foreign_lang" msgid "Italian" msgstr "Olasz" msgctxt "foreign_lang" msgid "Malayalam" msgstr "Malajálam" msgctxt "foreign_lang" msgid "Tibetian" msgstr "Tibeti" msgctxt "foreign_lang" msgid "Mongolian" msgstr "Mongol" msgctxt "foreign_lang" msgid "Romanian" msgstr "Román" msgctxt "foreign_lang" msgid "Bantu" msgstr "Bantu" msgctxt "foreign_lang" msgid "Norwegian" msgstr "Norvég" msgctxt "foreign_lang" msgid "Icelandic" msgstr "Izlandi" msgctxt "foreign_lang" msgid "Breton" msgstr "Breton" msgctxt "foreign_lang" msgid "Maori" msgstr "Maori" msgctxt "foreign_lang" msgid "Latin" msgstr "Latin" msgctxt "foreign_lang" msgid "Amharic" msgstr "Amhara" msgctxt "foreign_lang" msgid "Khmer" msgstr "Khmer" msgctxt "foreign_lang" msgid "Swahili" msgstr "Szuahéli" msgctxt "foreign_lang" msgid "Hebrew" msgstr "Héber" msgctxt "foreign_lang" msgid "Galician" msgstr "Galiciai" msgctxt "foreign_lang" msgid "Korean" msgstr "Koreai" msgctxt "foreign_lang" msgid "Tamil" msgstr "Tamil" msgctxt "foreign_lang" msgid "Viatnamese" msgstr "Vietnám" msgctxt "foreign_lang" msgid "Polish" msgstr "Lengyel" msgctxt "foreign_lang" msgid "Sanskrit" msgstr "Szanszkrit" msgctxt "foreign_lang" msgid "Persian" msgstr "Perzsa" msgctxt "foreign_lang" msgid "Filipino" msgstr "Filippínó" msgctxt "foreign_lang" msgid "Moldavian" msgstr "Moldáv" msgctxt "foreign_lang" msgid "Croatian" msgstr "Horvát" msgctxt "foreign_lang" msgid "Thai" msgstr "Thai" msgctxt "foreign_lang" msgid "Burmese" msgstr "Burmai" msgctxt "foreign_lang" msgid "Slovak" msgstr "Szlovák" msgctxt "foreign_lang" msgid "Czech" msgstr "Cseh" msgctxt "foreign_lang" msgid "Hindi" msgstr "Hindi" msgctxt "foreign_lang" msgid "Mapudungun" msgstr "Mapudungun" msgctxt "foreign_lang" msgid "Turkish" msgstr "Török" msgctxt "foreign_lang" msgid "Hawaiian" msgstr "Hawaii" msgctxt "foreign_lang" msgid "Afrikaans" msgstr "Afrikaans" msgctxt "foreign_lang" msgid "Esperanto" msgstr "Eszperantó" msgctxt "foreign_lang" msgid "Yiddish" msgstr "Jiddis" msgctxt "foreign_lang" msgid "Somali" msgstr "Szomáli" msgctxt "foreign_lang" msgid "Tahitian" msgstr "Tahiti" msgctxt "foreign_lang" msgid "Urdu" msgstr "Urdu" msgctxt "foreign_lang" msgid "Indonesian" msgstr "Indonéz" msgctxt "foreign_lang" msgid "Estonian" msgstr "Észt" msgctxt "foreign_lang" msgid "Bulgarian" msgstr "Bolgár" msgctxt "foreign_lang" msgid "Arabic" msgstr "Arab" msgctxt "foreign_lang" msgid "Danish" msgstr "Dán" msgctxt "foreign_lang" msgid "Portuguese" msgstr "Portugál" msgctxt "foreign_lang" msgid "Greek" msgstr "Görög" msgctxt "foreign_lang" msgid "Finnish" msgstr "Finn" msgctxt "foreign_lang" msgid "Ainu" msgstr "Ainu" msgctxt "foreign_lang" msgid "Algonquian" msgstr "Algonking" msgctxt "foreign_lang" msgid "French" msgstr "Francia" # Settings msgid "Language" msgstr "Nyelv" msgid "Display" msgstr "Megjelenítés" msgid "General" msgstr "Általános" msgid "Default search language" msgstr "Alapértelmezett keresési nyelv" msgid "Page language" msgstr "Weboldal nyelve" msgid "Always show english results" msgstr "Mindig angol eredmények mutatása" msgid "Show english results on top" msgstr "Angol eredmények felül" msgid "Focus search bar on load" msgstr "Keresősáv fókuszálása betöltéskor" msgid "Select input on load" msgstr "Bemenet kiválasztása betöltéskor" msgid "Results per page" msgstr "Eredmények száma egy oldalon" msgid "Number..." msgstr "Szám..." msgid "Input has to be in range of 1 and 100!" msgstr "A bemenetnek 1 és 100 között kell lennie!" msgid "max amount of names/words/sentences shown per page" msgstr "nevek/szavak/mondatok maximális száma egy oldalon" msgid "Show Furigana" msgstr "Furigana Mutatása" msgid "Show example sentences" msgstr "Példamondatok mutatása" msgid "Items per page" msgstr "Eredmények egy oldalon" msgid "max amount of kanji shown per page" msgstr "kanjik maximális száma egy oldalon" msgid "Use dark mode" msgstr "Sötét téma használata" msgid "Default kanji animation speed" msgstr "Alapértelmezett kanji animáció sebessége" msgid "Enable Quick-Copy" msgstr "" msgid "Share usage statistics" msgstr "Sütik Elfogadása" # Cookie text msgid "To use this feature you have to accept to the use of cookies." msgstr "Ennek a funkciónak a használatához el kell fogadnod a sütiket." msgid "Your data will only be used for your personal website settings." msgstr "Az adataid csak a személyes beállításaidhoz lesznek felhasználva." # Prefix of cookie revoke text msgid "Click" msgstr "Kattints" # Suffix of Cookie revoke text msgid "to revoke your Cookies agreement" msgstr "a sütik visszavonásához" # Suffix of Cookie agree text msgid "to enable Cookies" msgstr "a sütik engedélyezéséhez" # Footer msgid "Jotoba wouldn't be able to exist without the help of many open-source data sources" msgstr "A Jotoba nem jöhetett volna létre rengeteg nyílt-forráskódú adatforrás nélkül" msgid "About Page" msgstr "Rólunk Oldalunkat" # Cookie Footer msgid "We use cookies to improve your experience and deliver personalized content." msgstr "Sütiket használunk az élményed javítására és a személyre szabott tartalmakhoz" msgid "By using Jotoba you agree to our" msgstr "A Jotoba használatával elfogadod a" # - End of the above sentence - msgid "." msgstr "-ünket." msgid "privacy policy" msgstr "adatvédelmi házirend" msgid "Only use necessary" msgstr "Csak a szükségeseket" msgid "Allow Cookies" msgstr "Sütik Engedélyezése" ### About Page msgid "is a multilingual Japanese dictionary" msgstr "egy többnyelvű japán szótár" msgid "It is easy to find translations for words or kanji, see example sentences and the way names can be written." msgstr "Könnyen megtalálhatod szavak és kanjik fordítását, láthatsz példamondatokat és azt, hogy hogyan kell egyes neveket írni." msgid "Here are some examples on how to use this page" msgstr "Itt van pár példa az oldal használatához" msgid "Quickly change the search type by pressing" msgstr "Gyorsan megváltoztathatod a keresési típust" msgid "You can specify your search by typing" msgstr "Egyszerűen megadhatod a keresésedet" msgid "You can find verbs that are conjugated" msgstr "Ragozott igékre is kereshetsz" msgid "You can search multiple kanji at once" msgstr "Egyszerre több kanjira is kereshetsz" msgid "is open source" msgstr "nyílt forráskódú" msgid "Check out our" msgstr "Tekintsd meg a" msgid "Check out the" msgstr "Tekintsd meg a" msgid "aswell if you are interested in upcoming features and what we are currently working on" msgstr "-nkat is, ha érdekelnek a közelgő funkciók és szeretnéd látni, hogy éppen mind dolgozunk" msgid "for a list of all contributors in this project." msgstr "a projekt körzeműködői listájának a megtekintéséhez." msgctxt "index" msgid "or" msgstr "vagy" msgctxt "index" msgid "Press" msgstr "Nyomd meg a" msgid "to instantly focus the search bar" msgstr "gombot a keresősáv fókuszálásához" ### Info / Help Page msgid "Shortcuts" msgstr "Gyorsgombok" msgid "To improve the quality of life on Jotoba, we offer some shortcuts to quickly navigate the page:" msgstr "A Jotoba használatának megkönnyítésére létrehoztunk néhány gyorsgombot az oldal navigálásához:" msgid "Everywhere" msgstr "Mindenhol" msgid "Quickly change between words | sentences | names | kanji tabs" msgstr "Válts a szavak | mondatok | nevek | kanji lapok között" msgid "Focus the search bar" msgstr "Keresősáv fókuszálása" msgid "Focussed search bar" msgstr "Fókuszált keresősáv" msgid "Iterate suggestions up | down" msgstr "Felajánlások közötti lépés fel | le" msgid "Iterate suggestions down" msgstr "Felajánlások közötti lépés le" msgid "[Words] search" msgstr "[Szavak] keresés" msgid "Play the first possible audio" msgstr "Az első létező hanganyag lejátszása" msgid "[Kanji] search" msgstr "[Kanji] keresés" msgid "Show / Collapse compounds" msgstr "Összetételek mutatása / elrejtése" msgid "To specify what kind of results your search should offer, you can use shortcuts." msgstr "A keresés eredményének típusának beállításához használhatsz gyorsgombokat." msgid "Hashtags should be written at end end of your input like this:" msgstr "A hashtag-ek a keresés végére kerüljenek a következőképpen:" msgid "Available Hashtags for [Words] search" msgstr "Hashtag-ek [Szavak] kereséséhez" msgid "Search for nouns" msgstr "Főnevek keresése" msgid "Search for verbs" msgstr "Igék keresése" msgid "Search for transitive verbs" msgstr "Tranzitív igék keresése" msgid "Search for intransitive verbs" msgstr "Intranzitív igék keresése" msgid "Search for adverb" msgstr "Határozószavak keresése" msgid "Search for auxilary verbs" msgstr "Segédigék keresése" msgid "Search for adjectives" msgstr "Melléknevek keresése" msgid "Search for pronouns" msgstr "Névmások keresése" msgid "Search for conjugations" msgstr "Ragozások keresése" msgid "Search for prefixes" msgstr "Előtagok keresése" msgid "Search for suffixes" msgstr "Utótagok keresése" msgid "Search for japanese particles" msgstr "Japán partikulák keresése" msgid "Lists iru/eru ending verbs which are conjugated as godan verbs" msgstr "Godan igékhez hasonlóan ragozott iru/eru igék listázása" msgid "Search for sfx words [comic sounds]" msgstr "Hangutánzó szavak keresése" msgid "Search for words used for counting" msgstr "Számlálószavak keresése" msgid "Search for expressions" msgstr "Kifejezések keresése" msgid "Search for words used as interjections" msgstr "Indulatszavakként használt szavak keresése" msgid "Search for numeric words" msgstr "Numerikus szavak keresése" msgid "Search for abbreviations" msgstr "Rövidítések keresése" msgid "Search for words that don't fit in any category" msgstr "Más kategóriákba nem illő szavak keresése" msgid "Search for words included in the specific JLPT level" msgstr "JLPT szintekhez tartozó szavak keresése" msgid "Search in the [words] category" msgstr "Keresés a [szavak] kategóriában" msgid "Search in the [sentences] category" msgstr "Keresés a [mondatok] kategóriában" msgid "Search in the [name] category" msgstr "Keresés a [nevek] kategóriában" msgid "Search in the [kanji] category" msgstr "Keresés a [kanjik] kategóriában" msgid "Available Hashtags for [Sentence] search" msgstr "Hashtag-ek [Mondatok] kereséséhez" msgid "Search for sentences included in the specific JLPT level" msgstr "JLPT szintekhez tartotó mondatok keresése" msgid "Hide translations by default to translate them yourself and check if its correct" msgstr "Fordítások elrejtése, hogy lefordíthassad és ellenőrizhesd, hogy helyes-e" msgid "Available Hashtags for [Kanji] search" msgstr "Hashtag-ek [Kanjik] kereséséhez" msgid "Search for kanji included in the specific Genki chapter" msgstr "Kanjik keresése a megadott Genki fejezetben" msgid "Radical search" msgstr "Gyök keresés" msgid "The radical picker allows searching for radicals to make the process of picking radicals even faster. The supported inputs are as following:" msgstr "A gyökválasztóban lehet gyökökre keresni, így felgyorsítva a folyamatot. A megengedett bemenetek a következők:" msgid "Results in all radicals used to build given kanji characters" msgstr "Visszaadja a megadott kanjit felépítő gyököket" msgid "Searches in words for the given query and returns in result-matching radicals" msgstr "Rákeres a kifejezésre és visszatér az abban használt gyökökkel" msgid "Tries to find the given query in radicals names, otherwise does a word search and returns the result's kanji" msgstr "Gyököket keres a neveik alapján, vagy, ha ez sikertelen, kanjira keres" ## Name search msgid "Full name" msgstr "Teljes név" msgid "Sex" msgstr "Nem" msgid "Name origin" msgstr "Név eredete" ## Kanji results msgid "Part" msgid_plural "Parts" msgstr[0] "Rész" msgstr[1] "Részek" # strokes suffix msgid "{} stroke" msgid_plural "{} strokes" msgstr[0] "{} stroke" msgstr[1] "{} strokes" msgid "Decomposition" msgstr "Fogalmazás" msgid "Radical" msgstr "Gyök" msgid "Kun" msgstr "Kun" msgid "On" msgstr "On" msgid "On reading compounds" msgstr "On olvasat összetétel" msgid "Kun reading compounds" msgstr "Kun olvasat összetétel" msgid "JLPT level" msgstr "JLPT szint" msgid "of 2500 most used kanji in newspapers" msgstr " a 2500 újságokban leggyakrabban használt kanji közül" msgid "Similar Kanji" msgstr "Hasonló kanjik" msgid "Chinese reading" msgstr "Kínai olvasat" msgid "Korean reading" msgstr "Koreai olvasat" msgid "Vietnamese reading" msgstr "" msgid "Japanese names" msgstr "Japán nevek" ## Word results msgid "Words and kanji" msgstr "Szavak és kanjik" msgid "{} could be an inflection of {}, with this form:" msgid_plural "{} could be an inflection of {}, with this forms:" msgstr[0] "{} could be an inflection of {}, with this form:" msgstr[1] "{} could be an inflection of {}, with this forms:" msgid "Temporarily switched language to {}" msgstr "Átmenetileg a nyelv megváltozott {}-ra/re" msgctxt "inflection" msgid "Negative" msgstr "Negatív" msgctxt "inflection" msgid "Polite" msgstr "Udvarias" msgctxt "inflection" msgid "Present" msgstr "Jelen" msgctxt "inflection" msgid "Past" msgstr "Múlt" msgctxt "inflection" msgid "TeForm" msgstr "Te alak" msgctxt "inflection" msgid "Potential" msgstr "Feltételes" msgctxt "inflection" msgid "Passive" msgstr "Szenvedő" msgctxt "inflection" msgid "Causative" msgstr "Okhatározó" msgctxt "inflection" msgid "PotentialOrPassive" msgstr "Potenciális vagy szenvedő" msgctxt "inflection" msgid "Imperative" msgstr "Felszólító" msgctxt "inflection" msgid "Tai" msgstr "たい (Vágyat, kívánságot fejez ki)" msgctxt "inflection" msgid "TeIru" msgstr "ている (Egy jelenleg folyamatban lévő cselekvést fejez ki)" msgctxt "inflection" msgid "TeAru" msgstr "てある (Egy befejezett cselekvést fejez ki)" msgctxt "inflection" msgid "TeMiru" msgstr "てみる (Egy cselekvés megpróbálását fejezi ki)" msgctxt "inflection" msgid "Tara" msgstr "たら (Egy feltételt fejez ki)" msgid ", with this form:" msgid_plural ", with these forms:" msgstr[0] ", ebben a formában:" msgstr[1] ", ezekben a formákban:" msgid "Taught in {} grade" msgstr "{}. évben oktatott" msgid "Show Conjugations" msgstr "Ragozás Mutatása" msgid "Show collocation" msgid_plural "Show collocations" msgstr[0] "Besorolás mutatása" msgstr[1] "Besorolások mutatása" msgid "Collocations" msgstr "Besorolások" msgid "Conjugations" msgstr "Ragozások" msgid "Antonym of {}" msgstr "Ellentetje: {}" msgid "See also {}" msgstr "Lásd még {}" msgid "Pitch accent" msgstr "Zenei hangsúly" msgid "Other forms" msgstr "Más formák" msgid "Affirmative" msgstr "Igenlő" msgid "Negative" msgstr "Negatív" msgid "Present" msgstr "Jelen" msgid "Present, polite" msgstr "Jelen, udvarias" msgid "Past" msgstr "Múlt" msgid "Past, polite" msgstr "Múlt, udvarias" msgid "Te-form" msgstr "Te alak" msgid "Potential" msgstr "Feltételes" msgid "Passive" msgstr "Szenvedő" msgid "Causative" msgstr "Okhatározó" msgid "Causative Passive" msgstr "Szenvedő Okhatározó" msgid "Imperative" msgstr "Felszólító" msgid "Play audio" msgstr "Hanganyag lejátszása" msgid "common word" msgstr "gyakori szó" msgid "JLPT N{}" msgstr "" msgid "Download audio" msgstr "Hanganyag letöltése" msgid "Sentence search" msgstr "Mondatok keresése" msgid "Direct reference" msgstr "Közvetlen hivatkozás" # "no words found" msgid "words" msgstr "szó" # gairaigo msgid "From {}: {}" msgstr "{}-ból/ből: {}" ## Sentence search msgid "hide" msgstr "elrejtés" msgid "show" msgstr "mutatás" # "No sentences found" msgid "sentences" msgstr "mondat" ## About page # Title 1 msgid "About" msgstr "Rólunk" msgid "Jotoba is a multilingual Japanese dictionary. It is easy to find translations for words or kanji, see example sentences and the way names can be written." msgstr "A Jotoba egy többnyelvű japán szótár. Könnyen megtalálhatod szavak és kanjik fordítását, láthatsz példamondatokat és azt, hogy hogyan kell egyes neveket írni." msgid "Jotoba is open source. Check out our" msgstr "A Jotoba nyílt forráskódú. Tekintsd meg a" msgid "Github page" msgstr "Github oldalunkat" msgid "if you want to contribute or host Jotoba yourself." msgstr "ha szeretnél közreműködni, vagy magad futtatni a Jotoba-t." msgid "Trello Board" msgstr "Trello Táblánk" msgid "aswell if you are interested in upcoming features and what we are currently working on!" msgstr "is, ha érdekelnek a közelgő funkciók és, hogy min dolgozunk éppen!" # Title 2 msgid "Data Sources and Inspiration" msgstr "Adatforrások és Inspirációk" msgid "Of course this project wouldn't have been possible without the help of some great data sources." msgstr "Természetesen ez a projekt nem lett volna lehetséges néhány remek adatforrás nélkül." msgid "Many thanks to every one of them for providing such a variety of data people can use to learn the japanese language." msgstr "Ezer köszönet mindegyikőjüknek az általuk biztosított adatokért, hogy mindenki használhassa japánul tanuláshoz." # Source msgid "Jisho" msgstr "Jisho" msgid "Joto-kun" msgstr "Joto-kun" msgid "Joto-kun was created by a good friend of ours who is truly a wizard when it comes down to design!" msgstr "Joto-kunt egy jóbarátunk készítette, aki varázsolni tud, ha dizájnról van szó!" msgid "Jisho, created by Kim Ahlström, Miwa Ahlström and Andrew Plummer is a pretty and powerful english-japanese dictionary." msgstr "Jisho, Kim Ahlström, Miwa Ahlström és Andrew Plummer által egy gyönyörű és erőteljes angol-japán szótár." msgid "We took inspiration in their work and design to improve on their concept and offer it to a wider variety of people." msgstr "Ihletet merítettünk a munkájukból és dizájnukból, hogy javíthassunk az ötletükön és egy szélesebb körben felajánlhassuk." # Source msgid "Words (except sound effects), Kanji and Names available on this site are publicly provided and maintained by" msgstr "Az ezen az oldalon található szavakat (a hangutázószavak kivételével), kanjikat és neveket publikusan elérhetővé tette és karbantartja az" msgid "and available under the license" msgstr "és az alábbi licensz alatt publikálta:" msgid "Additionally, the RADKFILE by Jin Breen is used to link Radicals to Kanji." msgstr "Ezenkívül a Jin Breen által készített RADKFILE segítségével társítunk gyököket a kanjikhoz." # Source msgid "Audio Files" msgstr "Hanganyagok" msgid "The audio files #1 were graciously made public by" msgstr "Az #1-es hanganyagokat kegyesen publikusan elérhetővé tette a" msgid "WaniKani" msgstr "WaniKani" msgid "and" msgstr "és" msgid "Tofugo" msgstr "Tofugu" msgid "and uploaded to Github under the CC-BY-4.0 licence." msgstr "és feltöltötték Githubra CC-BY-4.0 licensz alatt." msgid "The audio files #2 are provided by the" msgstr "A #2-es hanganyagokat a" msgid "Kanji alive project" msgstr "Kanji alive projekt" msgid "and are also available under the CC-BY-4.0 license." msgstr "biztosította és tette elérhetővé CC-BY-4.0 licensz alatt." msgid "Manga Sound Effects" msgstr "Manga Hang Effektek" msgid "The data about Sound Effects is graciously provided by Chris Kincaid and is used as additional data in the word search." msgstr "A hangeffektekről szóló adatokat Chris Kincaid biztosította, hogy megjeleníthessük a szókeresés közben." # Source msgid "Sentences are provided by Tatoeba under the Creative Commons CC 1.0 and 2.0 licences. " msgstr "A mondatokat a Tatoeba biztosította Creative Commons CC 1.0 és 2.0 licensz alatt. " # Source msgid "Kanji Animations" msgstr "Kanji Animációk" msgid "The raw data used for kanji animations is publicly provided by KanjiVG, a project by Ulrich Apel." msgstr "A nyers adatokat a kanji animációkhoz az Ulrich Apel által létrehozott KanjiVG biztosította." msgid "The conversion into images and animated SVG is done by a ruby script which was made by" msgstr "A képekké és SVG-ké alakítást végző ruby szkript készítette" msgid "Kimtaro" msgstr "Kimtaro" msgid "and altered by" msgstr "és módosította" msgid "Yukáru" msgstr "Yukáru" # Source msgid "JLPT Data" msgstr "JLPT Adatok" msgid "Data about JLPT proficiencies are by provided by Jonathan Waller." msgstr "A JLPT szintek adatait Jonathan Waller biztosította." msgid "There is also some non-free data available on his website, so check it out if you are interested." msgstr "A weboldalán elérhető néhány nem ingyenes adatforrás is, úgyhogy nézz be, ha érdekel." # Source msgid "Word tokenization" msgstr "Szó elemekre bontésa" msgid "Word tokenization is done using UniDic, by the UniDic Consortium and used for Japanese morphological analysis implementations." msgstr "Word tokenization is done using UniDic, by the UniDic Consortium and used for Japanese morphological analysis implementations." #Source msgid "Pitch accents" msgstr "" msgid "Data about Radicals used in specific Kanji are provided by Kanjium." msgstr "" msgid "On the project's Github Page you can find lots of data about Kanji." msgstr "A projekt weboldalán rengeteg, kanjikról szóló adatot találhatsz." msgid "Pitch accent data has been extracted from UniDic." msgstr "" ## Jmdict # Dialect(s) msgid "{} dialect" msgstr "{} dialektus" # Information msgid "ateji" msgstr "ateji" msgid "irregular kana" msgstr "szabálytalan kana" msgid "irregular kanji" msgstr "szabálytalan kanji" msgid "irregular okurigana" msgstr "szabálytalan okurigana" msgid "outdated kana" msgstr "elavult kana" msgid "outdated kanji" msgstr "elavult kanji" msgid "gikun" msgstr "gikun" msgid "usually written in kana" msgstr "általában kanával írják" msgid "rarely used kanji form" msgstr "ritkán használt kanji forma" # Misc msgid "Abbreviation" msgstr "Rövidítés" msgid "Archaism" msgstr "Régies kifejezés" msgid "Character" msgstr "Karakter" msgid "Childrens language" msgstr "Gyereknyelv" msgid "Colloquialism" msgstr "Köznyelvi kifejezés" msgid "Company name" msgstr "Cégnév" msgid "Creature" msgstr "Élőlény" msgid "Dated term" msgstr "Régimódi kifejezés" msgid "Deity" msgstr "Istenség" msgid "Derogatory" msgstr "Lekicsinylő" msgid "Event" msgstr "Esemény" msgid "Document" msgstr "Dokumentum" msgid "Familiar language" msgstr "Intim nyelv" msgid "Female term/language" msgstr "Női kifejezés/nyelv" msgid "Fiction" msgstr "Fikció" msgid "Given name" msgstr "Keresztnév" msgid "Group" msgstr "Csoport" msgid "Historical term" msgstr "Történelmi kifejezés" msgid "Honorific language" msgstr "Megtisztelő nyelvezet" msgid "Humble language" msgstr "Szerény nyelvezet" msgid "Idiomatic expression" msgstr "Szólás" msgid "Jocular humorous term" msgstr "Tréfás kifejezés" msgid "Legend" msgstr "Legenda" msgid "Literary/formal term" msgstr "Irodalmi/hivatalos kifejezés" msgid "Manga slang" msgstr "Manga szleng" msgid "Male term/language" msgstr "Férfi nyelvezet" msgid "Mythology" msgstr "Mitológia" msgid "Internet slang" msgstr "Internet szleng" msgid "Object" msgstr "Tárgy" msgid "Obsolete term" msgstr "Elavult kifejezls" msgid "Obscure term" msgstr "Homályos kifejezés" msgid "Onomatopoetic or mimetic word" msgstr "Hangutánzó szó" msgid "Organization name" msgstr "Szervezet neve" msgid "Other" msgstr "Egyéb" msgid "Person name" msgstr "Személynév" msgid "Place name" msgstr "Helynév" msgid "Poetical term" msgstr "Költői kifejezés" msgid "Polite language" msgstr "Udvarias nyelv" msgid "Product name" msgstr "Termék név" msgid "Proverb" msgstr "Közmondás" msgid "Qutation" msgstr "Idézet" msgid "Rare" msgstr "Ritka" msgid "Religion" msgstr "Vallás" msgid "Sensitive" msgstr "Érzékeny" msgid "Service" msgstr "Szolgáltatás" msgid "Slang" msgstr "Szleng" msgid "Railway station" msgstr "Vasútállomás" msgid "Family or surname" msgstr "Vezetéknév" msgid "Usually written in kana" msgstr "Általában kanával írják" msgid "Unclassified name" msgstr "Rendszerezetlen név" msgid "Vulgar expression/word" msgstr "Vulgáris kifejezés/szó" msgid "Artwork" msgstr "Műalkotás" msgid "Rude/x-rated term" msgstr "Sértő/X-besorolású kifejezés" msgid "Yojijukugo" msgstr "Yojijukugo" # Fields msgid "{} term" msgstr "{} kifejezés" msgid "Agriculture" msgstr "Mezőgazdaság" msgid "Anatomy" msgstr "Anatómia" msgid "Archeology" msgstr "Régészet" msgid "Architecture" msgstr "Építészet" msgid "Art aesthetics" msgstr "Művészet" msgid "Astronomy" msgstr "Űrtudomány" msgid "Audio/visual" msgstr "Audiovizuális" msgid "Aviation" msgstr "Repülés" msgid "Baseball" msgstr "Baseball" msgid "Biochemistry" msgstr "Biokémia" msgid "Biology" msgstr "Biológia" msgid "Botany" msgstr "Botanika" msgid "Buddhism" msgstr "Buddhizmus" msgid "Business" msgstr "Biznisz" msgid "Chemistry" msgstr "Kémia" msgid "Christianity" msgstr "Kereszténység" msgid "Computing" msgstr "Számítógépes tudomány" msgid "Clothing" msgstr "Ruházat" msgid "Crystallography" msgstr "Kristálytan" msgid "Ecology" msgstr "Ökológia" msgid "Economics" msgstr "Ökonómia" msgid "Electricity" msgstr "Elektromosság" msgid "Electronics" msgstr "Elektronika" msgid "Embryology" msgstr "Embriológia" msgid "Engineering" msgstr "Gépészet" msgid "Entomology" msgstr "Entomológia" msgid "Finance" msgstr "Pénzügytan" msgid "Fishing" msgstr "Horgászat" msgid "FoodCooking" msgstr "Főzés" msgid "Gardening" msgstr "Kertészkedés" msgid "Genetics" msgstr "Genetika" msgid "Geography" msgstr "Földrajz" msgid "Geology" msgstr "Geológia" msgid "Geometry" msgstr "Geometria" msgid "Go (game)" msgstr "Go (játék)" msgid "Golf" msgstr "Golf" msgid "Grammar" msgstr "Nyelvtan" msgid "Greek mythology" msgstr "Görög mitológia" msgid "Hanafuda" msgstr "Hanafuda" msgid "Horseracing" msgstr "Lóverseny" msgid "Law" msgstr "Jog" msgid "Linguistics" msgstr "Nyelvészet" msgid "Logic" msgstr "Logika" msgid "Martial arts" msgstr "Harcművészet" msgid "Mahjong" msgstr "Mahjong" msgid "Mathematics" msgstr "Matematika" msgid "MechanicalEngineering" msgstr "Gépészmérnök" msgid "Medicine" msgstr "Orvostudomány" msgid "Climate/weather" msgstr "Időjárás" msgid "Military" msgstr "Katonaság" msgid "Music" msgstr "Zene" msgid "Ornithology" msgstr "Orniológia" msgid "Paleontology" msgstr "Paleontológia" msgid "Pathology" msgstr "Patológia" msgid "Pharmacy" msgstr "Gyógyszerészet" msgid "Philosophy" msgstr "Filozófia" msgid "Photography" msgstr "Fényképezés" msgid "Physics" msgstr "Fizika" msgid "Physiology" msgstr "Fiziológia" msgid "Printing" msgstr "Nyomtatás" msgid "Psychology" msgstr "Pszichológia" msgid "Psychiatry" msgstr "Pszichiátria" msgid "Railway" msgstr "Vasút" msgid "Shinto" msgstr "Shinto" msgid "Shogi" msgstr "Shogi" msgid "Sports" msgstr "Sport" msgid "Statistics" msgstr "Statisztika" msgid "Sumo" msgstr "Szumó" msgid "Telecommunications" msgstr "Telekommunikáció" msgid "Trademark" msgstr "Védjegy" msgid "Videogame" msgstr "Videójáték" msgid "Zoology" msgstr "Zoológia" # Part of speech msgid "Godan verb" msgstr "Godan ige" msgid "Irregular verb with {} ending" msgstr "Szabálytalan ige {} végződéssel" msgid "SoundFx" msgstr "Hangeffekt" msgid "Expression" msgstr "Kifejezés" msgid "Counter" msgstr "Számlálószó" msgid "Suffix" msgstr "Utótag" msgid "Prefix" msgstr "Előtag" msgid "Particle" msgstr "Partikula" msgid "Interjection" msgstr "Indulatszó" msgid "Symbol" msgstr "Szimbólum" msgid "Pronoun" msgstr "Névmás" msgid "Auxilary" msgstr "Segédige" msgid "Numeric" msgstr "Szám" msgid "Adverb-To" msgstr "To Határozószó" msgid "Adverb" msgstr "Határozószó" msgid "Adjective" msgstr "Melléknév" msgid "Auxilary adjective" msgstr "Segédmelléknév" msgid "Auxilary Verb" msgstr "Segédige" msgid "Verb" msgstr "Ige" msgid "Conjugation" msgstr "Ragozás" msgid "Unclassified" msgstr "Csoportosítatlan" msgid "Noun or verb describing a noun" msgstr "Főnév, vagy főnevet leíró ige" msgid "I adjective" msgstr "I melléknév" msgid "I adjective (conjugated like いい)" msgstr "I melléknév (いい-ként ragozva)" msgid "Ku adjective" msgstr "Ku melléknév" msgid "Na adjective" msgstr "Na melléknév" msgid "Formal form of na adjective" msgstr "Na melléknév hivatalos alakja" msgid "No adjective" msgstr "No melléknév" msgid "Pre noun adjective" msgstr "Főnév előtti melléknév" msgid "Shiku adjective" msgstr "Skiku melléknév" msgid "Taru adjective" msgstr "Taru melléknév" msgid "Noun" msgstr "Főnév" msgid "Noun adverbial" msgstr "Határozói Főnév" msgid "Prefix (noun)" msgstr "Előtag (főnév)" msgid "Suffix (noun)" msgstr "Utótag (főnév)" msgid "Temporal noun" msgstr "Időt leíró főnév" msgid "Unspecified verb" msgstr "Csoportosítatlan ige" msgid "Intransitive verb" msgstr "Intransitív ige" msgid "Transitive verb" msgstr "Tranzitív ige" msgid "Ichidan verb" msgstr "Ichidan ige" msgid "Ichidan zuru verb" msgstr "Ichidan zuru ige" msgid "Ichidan kureru verb" msgstr "Ichidan kureru ige" msgid "Kuru verb" msgstr "Kuru ige" msgid "Noun taking suru" msgstr "Suru-t felvevő főnév" msgid "Suru verb" msgstr "Suru ige" msgid "Suru special" msgstr "Speciális suru" msgid "Pre-noun" msgstr "Főnév előtti" # this thingy -> " " msgid "Space" msgstr "Szóköz" ## Name search msgctxt "name_type" msgid "Company" msgstr "Cég" msgctxt "name_type" msgid "Female" msgstr "Női" msgctxt "name_type" msgid "Male" msgstr "Férfi" msgctxt "name_type" msgid "Organization" msgstr "Szervezet" msgctxt "name_type" msgid "Persons name" msgstr "Személy" msgctxt "name_type" msgid "Place" msgstr "Hely" msgctxt "name_type" msgid "Product" msgstr "Termék" msgctxt "name_type" msgid "(Railway)Station" msgstr "Vasútállomás" msgctxt "name_type" msgid "Surname" msgstr "Vezetéknév" msgctxt "name_type" msgid "Unknown" msgstr "Ismeretlen" msgctxt "name_type" msgid "Art work" msgstr "Műalkotás" msgctxt "name_type" msgid "Character" msgstr "Karakter" msgctxt "name_type" msgid "Deity" msgstr "Istenség" msgctxt "name_type" msgid "Document" msgstr "Dokumetum" msgctxt "name_type" msgid "Event" msgstr "Esemény" msgctxt "name_type" msgid "Fiction" msgstr "Fikció" msgctxt "name_type" msgid "Group" msgstr "Csoport" msgctxt "name_type" msgid "Legend" msgstr "Legenda" msgctxt "name_type" msgid "Mythology" msgstr "Mitológia" msgctxt "name_type" msgid "Object" msgstr "Tárgy" msgctxt "name_type" msgid "Other" msgstr "Egyéb" msgctxt "name_type" msgid "Religion" msgstr "Vallás" msgctxt "name_type" msgid "Service" msgstr "Szolgáltatás" # "No names found" msgid "names" msgstr "név" ## Search help msgid "Search Help" msgstr "Keresési segéd" msgid "No {} found" msgstr "Nincs a keresésnek megfelelő {}" msgid "Your default search language might not fit your input" msgstr "Az alapértelmezett keresési nyelved lehet, hogy nem passzol a bemenethez" msgid "Check your search for typos" msgstr "Ellenőrizd, hogy nem írtad-e el a keresési kifejezésedet" msgid "Use more generic search terms" msgstr "Használj általánosabb keresési kifejezéseket" msgid "Try finding your search in a different category using" msgstr "Próbáld a kifejezésedet egy másik kategóriában megkeresni:" msgid "Your search request might not be included in our database yet" msgstr "Lehet, hogy az adatbázisunk még nem tartalmazza a keresési kifejezésedet" msgid "If you think your search should be contained in our database, submit an issue on" msgstr "Ha úgy gondolod, hogy az adatbázisunknak tartalmaznia kéne a keresésedet, küldj be egy kérelmet:" # Also check our Trello board since we might be working on it msgid "Also check our" msgstr "Ellenőrizd a" msgid "since we might be working on it!" msgstr ", hátha épp dolgozunk rajta" # Paginator msgid "First" msgstr "Első" msgid "Last" msgstr "Utolsó" ================================================ FILE: rustfmt.toml ================================================ edition="2021" ================================================ FILE: scripts/gen_locales.sh ================================================ #!/bin/bash for i in `ls ./locales | grep \.po`;do msgfmt ./locales/$i -o /tmp/${i%.po}.mo;done mv /tmp/*.mo ./locales